X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fl3fwd-power%2Fmain.c;h=596d64548d1110e9ddcd8a6ae53dd1e9576d69ac;hb=aed545af1b5ed6b7baa2eb41bad63486d6c95226;hp=8a8f34b50b3ade7e2cf36670390ea6bdb0755bb5;hpb=f8244c6399d9fae6afab6770ae367aef38742ea5;p=dpdk.git diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index 8a8f34b50b..596d64548d 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -50,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -60,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -81,8 +50,6 @@ #define MIN_ZERO_POLL_COUNT 10 -/* around 100ms at 2 Ghz */ -#define TIMER_RESOLUTION_CYCLES 200000000ULL /* 100 ms interval */ #define TIMER_NUMBER_PER_SECOND 10 /* 100000 us */ @@ -145,8 +112,8 @@ /* * Configurable number of RX/TX ring descriptors */ -#define RTE_TEST_RX_DESC_DEFAULT 512 -#define RTE_TEST_TX_DESC_DEFAULT 512 +#define RTE_TEST_RX_DESC_DEFAULT 1024 +#define RTE_TEST_TX_DESC_DEFAULT 1024 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; @@ -217,11 +184,9 @@ static struct rte_eth_conf port_conf = { .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_CHECKSUM), }, .rx_adv_conf = { .rss_conf = { @@ -233,7 +198,6 @@ static struct rte_eth_conf port_conf = { .mq_mode = ETH_MQ_TX_NONE, }, .intr_conf = { - .lsc = 1, .rxq = 1, }, }; @@ -377,7 +341,7 @@ static void signal_exit_now(int sigtype) { unsigned lcore_id; - unsigned int portid, nb_ports; + unsigned int portid; int ret; if (sigtype == SIGINT) { @@ -393,8 +357,7 @@ signal_exit_now(int sigtype) "core%u\n", lcore_id); } - nb_ports = rte_eth_dev_count(); - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) continue; @@ -451,7 +414,7 @@ power_timer_cb(__attribute__((unused)) struct rte_timer *tim, /* Enqueue a single packet, and send burst if queue is filled */ static inline int -send_single_packet(struct rte_mbuf *m, uint8_t port) +send_single_packet(struct rte_mbuf *m, uint16_t port) { uint32_t lcore_id; struct lcore_conf *qconf; @@ -659,7 +622,7 @@ add_cb_parse_ptype(uint16_t portid, uint16_t queueid) } static inline void -l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, +l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid, struct lcore_conf *qconf) { struct ether_hdr *eth_hdr; @@ -752,8 +715,6 @@ power_idle_heuristic(uint32_t zero_rx_packet_count) */ else return SUSPEND_THRESHOLD; - - return 0; } static inline enum freq_scale_hint_t @@ -761,6 +722,7 @@ power_freq_scaleup_heuristic(unsigned lcore_id, uint16_t port_id, uint16_t queue_id) { + uint32_t rxq_count = rte_eth_rx_queue_count(port_id, queue_id); /** * HW Rx queue size is 128 by default, Rx burst read at maximum 32 entries * per iteration @@ -772,15 +734,12 @@ power_freq_scaleup_heuristic(unsigned lcore_id, #define FREQ_UP_TREND2_ACC 100 #define FREQ_UP_THRESHOLD 10000 - if (likely(rte_eth_rx_descriptor_done(port_id, queue_id, - FREQ_GEAR3_RX_PACKET_THRESHOLD) > 0)) { + if (likely(rxq_count > FREQ_GEAR3_RX_PACKET_THRESHOLD)) { stats[lcore_id].trend = 0; return FREQ_HIGHEST; - } else if (likely(rte_eth_rx_descriptor_done(port_id, queue_id, - FREQ_GEAR2_RX_PACKET_THRESHOLD) > 0)) + } else if (likely(rxq_count > FREQ_GEAR2_RX_PACKET_THRESHOLD)) stats[lcore_id].trend += FREQ_UP_TREND2_ACC; - else if (likely(rte_eth_rx_descriptor_done(port_id, queue_id, - FREQ_GEAR1_RX_PACKET_THRESHOLD) > 0)) + else if (likely(rxq_count > FREQ_GEAR1_RX_PACKET_THRESHOLD)) stats[lcore_id].trend += FREQ_UP_TREND1_ACC; if (likely(stats[lcore_id].trend > FREQ_UP_THRESHOLD)) { @@ -879,7 +838,7 @@ main_loop(__attribute__((unused)) void *dummy) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; unsigned lcore_id; - uint64_t prev_tsc, diff_tsc, cur_tsc; + uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz; uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power; int i, j, nb_rx; uint8_t queueid; @@ -894,6 +853,8 @@ main_loop(__attribute__((unused)) void *dummy) const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; prev_tsc = 0; + hz = rte_get_timer_hz(); + tim_res_tsc = hz/TIMER_NUMBER_PER_SECOND; lcore_id = rte_lcore_id(); qconf = &lcore_conf[lcore_id]; @@ -939,7 +900,7 @@ main_loop(__attribute__((unused)) void *dummy) } diff_tsc_power = cur_tsc_power - prev_tsc_power; - if (diff_tsc_power > TIMER_RESOLUTION_CYCLES) { + if (diff_tsc_power > tim_res_tsc) { rte_timer_manage(); prev_tsc_power = cur_tsc_power; } @@ -1055,9 +1016,11 @@ start_rx: turn_on_intr(qconf); sleep_until_rx_interrupt( qconf->n_rx_queue); + /** + * start receiving packets immediately + */ + goto start_rx; } - /* start receiving packets immediately */ - goto start_rx; } stats[lcore_id].sleep_time += lcore_idle_hint; } @@ -1093,7 +1056,7 @@ check_lcore_params(void) } static int -check_port_config(const unsigned nb_ports) +check_port_config(void) { unsigned portid; uint16_t i; @@ -1105,7 +1068,7 @@ check_port_config(const unsigned nb_ports) portid); return -1; } - if (portid >= nb_ports) { + if (!rte_eth_dev_is_valid_port(portid)) { printf("port %u is not present on the board\n", portid); return -1; @@ -1115,7 +1078,7 @@ check_port_config(const unsigned nb_ports) } static uint8_t -get_port_n_rx_queues(const uint8_t port) +get_port_n_rx_queues(const uint16_t port) { int queue = -1; uint16_t i; @@ -1321,7 +1284,10 @@ parse_args(int argc, char **argv) 0, 0}; printf("jumbo frame is enabled \n"); - port_conf.rxmode.jumbo_frame = 1; + port_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MULTI_SEGS; /** * if no max-pkt-len set, use the default value @@ -1545,7 +1511,7 @@ init_mem(unsigned nb_mbuf) /* Check the link status of all ports in up to 9s, and print them finally */ static void -check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) +check_all_ports_link_status(uint32_t port_mask) { #define CHECK_INTERVAL 100 /* 100ms */ #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ @@ -1557,7 +1523,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) fflush(stdout); for (count = 0; count <= MAX_CHECK_TIME; count++) { all_ports_up = 1; - for (portid = 0; portid < port_num; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if ((port_mask & (1 << portid)) == 0) continue; memset(&link, 0, sizeof(link)); @@ -1599,7 +1565,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) } } -static int check_ptype(uint8_t portid) +static int check_ptype(uint16_t portid) { int i, ret; int ptype_l3_ipv4 = 0; @@ -1650,7 +1616,7 @@ main(int argc, char **argv) struct rte_eth_dev_info dev_info; struct rte_eth_txconf *txconf; int ret; - unsigned nb_ports; + uint16_t nb_ports; uint16_t queueid; unsigned lcore_id; uint64_t hz; @@ -1658,7 +1624,6 @@ main(int argc, char **argv) uint32_t dev_rxq_num, dev_txq_num; uint8_t nb_rx_queue, queue, socketid; uint16_t portid; - uint16_t org_rxq_intr = port_conf.intr_conf.rxq; /* catch SIGINT and restore cpufreq governor to ondemand */ signal(SIGINT, signal_exit_now); @@ -1685,15 +1650,17 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); - nb_ports = rte_eth_dev_count(); + nb_ports = rte_eth_dev_count_avail(); - if (check_port_config(nb_ports) < 0) + if (check_port_config() < 0) rte_exit(EXIT_FAILURE, "check_port_config failed\n"); nb_lcores = rte_lcore_count(); /* initialize all ports */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { + struct rte_eth_conf local_port_conf = port_conf; + /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { printf("\nSkipping disabled port %d\n", portid); @@ -1721,11 +1688,13 @@ main(int argc, char **argv) nb_rx_queue, (unsigned)n_tx_queue ); /* If number of Rx queue is 0, no need to enable Rx interrupt */ if (nb_rx_queue == 0) - port_conf.intr_conf.rxq = 0; + local_port_conf.intr_conf.rxq = 0; + rte_eth_dev_info_get(portid, &dev_info); + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; ret = rte_eth_dev_configure(portid, nb_rx_queue, - (uint16_t)n_tx_queue, &port_conf); - /* Revert to original value */ - port_conf.intr_conf.rxq = org_rxq_intr; + (uint16_t)n_tx_queue, &local_port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: " "err=%d, port=%d\n", ret, portid); @@ -1780,10 +1749,9 @@ main(int argc, char **argv) printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); fflush(stdout); - rte_eth_dev_info_get(portid, &dev_info); txconf = &dev_info.default_txconf; - if (port_conf.rxmode.jumbo_frame) - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = local_port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socketid, txconf); if (ret < 0) @@ -1823,8 +1791,14 @@ main(int argc, char **argv) fflush(stdout); /* init RX queues */ for(queue = 0; queue < qconf->n_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + struct rte_eth_dev *dev; + struct rte_eth_conf *conf; + portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; + dev = &rte_eth_devices[portid]; + conf = &dev->data->dev_conf; if (numa_on) socketid = \ @@ -1835,8 +1809,11 @@ main(int argc, char **argv) printf("rxq=%d,%d,%d ", portid, queueid, socketid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = conf->rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, - socketid, NULL, + socketid, &rxq_conf, pktmbuf_pool[socketid]); if (ret < 0) rte_exit(EXIT_FAILURE, @@ -1856,7 +1833,7 @@ main(int argc, char **argv) printf("\n"); /* start ports */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) { continue; } @@ -1877,7 +1854,7 @@ main(int argc, char **argv) rte_spinlock_init(&(locks[portid])); } - check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask); + check_all_ports_link_status(enabled_port_mask); /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);