X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fl3fwd-power%2Fmain.c;h=15b47c7ddd2e090377bd8979c3b375684368b377;hb=e2a6f1246e273fb63a829badf0916029611a98bf;hp=605705991942bd584a30476f484d5d57c83ff9b6;hpb=0e433e5f0891ecd18a694ed733a28e69b1470c25;p=dpdk.git diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index 6057059919..15b47c7ddd 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -65,7 +66,6 @@ #include #include #include -#include #include #include #include @@ -74,12 +74,14 @@ #include #include #include +#include +#include #define RTE_LOGTYPE_L3FWD_POWER RTE_LOGTYPE_USER1 #define MAX_PKT_BURST 32 -#define MIN_ZERO_POLL_COUNT 5 +#define MIN_ZERO_POLL_COUNT 10 /* around 100ms at 2 Ghz */ #define TIMER_RESOLUTION_CYCLES 200000000ULL @@ -153,12 +155,17 @@ static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /* ethernet addresses of ports */ static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; +/* ethernet addresses of ports */ +static rte_spinlock_t locks[RTE_MAX_ETHPORTS]; + /* mask of enabled ports */ static uint32_t enabled_port_mask = 0; /* Ports set in promiscuous mode off by default. */ static int promiscuous_on = 0; /* NUMA is enabled by default. */ static int numa_on = 1; +static int parse_ptype; /**< Parse packet type using rx callback, and */ + /**< disabled by default */ enum freq_scale_hint_t { @@ -168,11 +175,6 @@ enum freq_scale_hint_t FREQ_HIGHEST = 2 }; -struct mbuf_table { - uint16_t len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - struct lcore_rx_queue { uint8_t port_id; uint8_t queue_id; @@ -185,6 +187,9 @@ struct lcore_rx_queue { #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS #define MAX_RX_QUEUE_PER_PORT 128 +#define MAX_RX_QUEUE_INTERRUPT_PER_PORT 16 + + #define MAX_LCORE_PARAMS 1024 struct lcore_params { uint8_t port_id; @@ -211,7 +216,7 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / static struct rte_eth_conf port_conf = { .rxmode = { - .mq_mode = ETH_MQ_RX_RSS, + .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, .header_split = 0, /**< Header Split disabled */ @@ -223,11 +228,15 @@ static struct rte_eth_conf port_conf = { .rx_adv_conf = { .rss_conf = { .rss_key = NULL, - .rss_hf = ETH_RSS_IP, + .rss_hf = ETH_RSS_UDP, }, }, .txmode = { - .mq_mode = ETH_DCB_NONE, + .mq_mode = ETH_MQ_TX_NONE, + }, + .intr_conf = { + .lsc = 1, + .rxq = 1, }, }; @@ -335,8 +344,10 @@ static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS]; struct lcore_conf { uint16_t n_rx_queue; struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t n_tx_port; + uint16_t tx_port_id[RTE_MAX_ETHPORTS]; uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; + struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; lookup_struct_t * ipv4_lookup_struct; lookup_struct_t * ipv6_lookup_struct; } __rte_cache_aligned; @@ -368,6 +379,7 @@ static void signal_exit_now(int sigtype) { unsigned lcore_id; + unsigned int portid, nb_ports; int ret; if (sigtype == SIGINT) { @@ -382,6 +394,15 @@ signal_exit_now(int sigtype) "library de-initialization failed on " "core%u\n", lcore_id); } + + nb_ports = rte_eth_dev_count(); + for (portid = 0; portid < nb_ports; portid++) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + rte_eth_dev_stop(portid); + rte_eth_dev_close(portid); + } } rte_exit(EXIT_SUCCESS, "User forced exit\n"); @@ -399,19 +420,22 @@ power_timer_cb(__attribute__((unused)) struct rte_timer *tim, /* accumulate total execution time in us when callback is invoked */ sleep_time_ratio = (float)(stats[lcore_id].sleep_time) / (float)SCALING_PERIOD; - /** * check whether need to scale down frequency a step if it sleep a lot. */ - if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD) - rte_power_freq_down(lcore_id); + if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD) { + if (rte_power_freq_down) + rte_power_freq_down(lcore_id); + } else if ( (unsigned)(stats[lcore_id].nb_rx_processed / - stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST) + stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST) { /** * scale down a step if average packet per iteration less * than expectation. */ - rte_power_freq_down(lcore_id); + if (rte_power_freq_down) + rte_power_freq_down(lcore_id); + } /** * initialize another timer according to current frequency to ensure @@ -427,49 +451,19 @@ power_timer_cb(__attribute__((unused)) struct rte_timer *tim, stats[lcore_id].sleep_time = 0; } -/* Send burst of packets on an output interface */ -static inline int -send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) -{ - struct rte_mbuf **m_table; - int ret; - uint16_t queueid; - - queueid = qconf->tx_queue_id[port]; - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, queueid, m_table, n); - if (unlikely(ret < n)) { - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - /* Enqueue a single packet, and send burst if queue is filled */ static inline int send_single_packet(struct rte_mbuf *m, uint8_t port) { uint32_t lcore_id; - uint16_t len; struct lcore_conf *qconf; lcore_id = rte_lcore_id(); - qconf = &lcore_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - qconf->tx_mbufs[port].len = len; + rte_eth_tx_buffer(port, qconf->tx_queue_id[port], + qconf->tx_buffer[port], m); + return 0; } @@ -616,7 +610,7 @@ static inline uint8_t get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct) { - uint8_t next_hop; + uint32_t next_hop; return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct, rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)? @@ -624,6 +618,48 @@ get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, } #endif +static inline void +parse_ptype_one(struct rte_mbuf *m) +{ + struct ether_hdr *eth_hdr; + uint32_t packet_type = RTE_PTYPE_UNKNOWN; + uint16_t ether_type; + + eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + ether_type = eth_hdr->ether_type; + if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) + packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + + m->packet_type = packet_type; +} + +static uint16_t +cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused, + struct rte_mbuf *pkts[], uint16_t nb_pkts, + uint16_t max_pkts __rte_unused, + void *user_param __rte_unused) +{ + unsigned int i; + + for (i = 0; i < nb_pkts; ++i) + parse_ptype_one(pkts[i]); + + return nb_pkts; +} + +static int +add_cb_parse_ptype(uint8_t portid, uint16_t queueid) +{ + printf("Port %d: softly parse packet type info\n", portid); + if (rte_eth_add_rx_callback(portid, queueid, cb_parse_ptype, NULL)) + return 0; + + printf("Failed to add rx callback: port=%d\n", portid); + return -1; +} + static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf) @@ -635,11 +671,11 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); - if (m->ol_flags & PKT_RX_IPV4_HDR) { + if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { /* Handle IPv4 headers.*/ ipv4_hdr = - (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char*) - + sizeof(struct ether_hdr)); + rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, + sizeof(struct ether_hdr)); #ifdef DO_RFC_1812_CHECKS /* Check to make sure the packet is valid (RFC1812) */ @@ -670,15 +706,14 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); send_single_packet(m, dst_port); - } - else { + } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { /* Handle IPv6 headers.*/ #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) struct ipv6_hdr *ipv6_hdr; ipv6_hdr = - (struct ipv6_hdr *)(rte_pktmbuf_mtod(m, unsigned char*) - + sizeof(struct ether_hdr)); + rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, + sizeof(struct ether_hdr)); dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct); @@ -700,26 +735,25 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, /* We don't currently handle IPv6 packets in LPM mode. */ rte_pktmbuf_free(m); #endif - } + } else + rte_pktmbuf_free(m); } -#define SLEEP_GEAR1_THRESHOLD 100 -#define SLEEP_GEAR2_THRESHOLD 1000 +#define MINIMUM_SLEEP_TIME 1 +#define SUSPEND_THRESHOLD 300 static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count) { - /* If zero count is less than 100, use it as the sleep time in us */ - if (zero_rx_packet_count < SLEEP_GEAR1_THRESHOLD) - return zero_rx_packet_count; - /* If zero count is less than 1000, sleep time should be 100 us */ - else if ((zero_rx_packet_count >= SLEEP_GEAR1_THRESHOLD) && - (zero_rx_packet_count < SLEEP_GEAR2_THRESHOLD)) - return SLEEP_GEAR1_THRESHOLD; - /* If zero count is greater than 1000, sleep time should be 1000 us */ - else if (zero_rx_packet_count >= SLEEP_GEAR2_THRESHOLD) - return SLEEP_GEAR2_THRESHOLD; + /* If zero count is less than 100, sleep 1us */ + if (zero_rx_packet_count < SUSPEND_THRESHOLD) + return MINIMUM_SLEEP_TIME; + /* If zero count is less than 1000, sleep 100 us which is the + minimum latency switching from C3/C6 to C0 + */ + else + return SUSPEND_THRESHOLD; return 0; } @@ -759,6 +793,85 @@ power_freq_scaleup_heuristic(unsigned lcore_id, return FREQ_CURRENT; } +/** + * force polling thread sleep until one-shot rx interrupt triggers + * @param port_id + * Port id. + * @param queue_id + * Rx queue id. + * @return + * 0 on success + */ +static int +sleep_until_rx_interrupt(int num) +{ + struct rte_epoll_event event[num]; + int n, i; + uint8_t port_id, queue_id; + void *data; + + RTE_LOG(INFO, L3FWD_POWER, + "lcore %u sleeps until interrupt triggers\n", + rte_lcore_id()); + + n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, -1); + for (i = 0; i < n; i++) { + data = event[i].epdata.data; + port_id = ((uintptr_t)data) >> CHAR_BIT; + queue_id = ((uintptr_t)data) & + RTE_LEN2MASK(CHAR_BIT, uint8_t); + rte_eth_dev_rx_intr_disable(port_id, queue_id); + RTE_LOG(INFO, L3FWD_POWER, + "lcore %u is waked up from rx interrupt on" + " port %d queue %d\n", + rte_lcore_id(), port_id, queue_id); + } + + return 0; +} + +static void turn_on_intr(struct lcore_conf *qconf) +{ + int i; + struct lcore_rx_queue *rx_queue; + uint8_t port_id, queue_id; + + for (i = 0; i < qconf->n_rx_queue; ++i) { + rx_queue = &(qconf->rx_queue_list[i]); + port_id = rx_queue->port_id; + queue_id = rx_queue->queue_id; + + rte_spinlock_lock(&(locks[port_id])); + rte_eth_dev_rx_intr_enable(port_id, queue_id); + rte_spinlock_unlock(&(locks[port_id])); + } +} + +static int event_register(struct lcore_conf *qconf) +{ + struct lcore_rx_queue *rx_queue; + uint8_t portid, queueid; + uint32_t data; + int ret; + int i; + + for (i = 0; i < qconf->n_rx_queue; ++i) { + rx_queue = &(qconf->rx_queue_list[i]); + portid = rx_queue->port_id; + queueid = rx_queue->queue_id; + data = portid << CHAR_BIT | queueid; + + ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid, + RTE_EPOLL_PER_THREAD, + RTE_INTR_EVENT_ADD, + (void *)((uintptr_t)data)); + if (ret) + return ret; + } + + return 0; +} + /* main processing loop */ static int main_loop(__attribute__((unused)) void *dummy) @@ -772,9 +885,9 @@ main_loop(__attribute__((unused)) void *dummy) struct lcore_conf *qconf; struct lcore_rx_queue *rx_queue; enum freq_scale_hint_t lcore_scaleup_hint; - uint32_t lcore_rx_idle_count = 0; uint32_t lcore_idle_hint = 0; + int intr_en = 0; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; @@ -791,13 +904,18 @@ main_loop(__attribute__((unused)) void *dummy) RTE_LOG(INFO, L3FWD_POWER, "entering main loop on lcore %u\n", lcore_id); for (i = 0; i < qconf->n_rx_queue; i++) { - portid = qconf->rx_queue_list[i].port_id; queueid = qconf->rx_queue_list[i].queue_id; RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%hhu " "rxqueueid=%hhu\n", lcore_id, portid, queueid); } + /* add into event wait list */ + if (event_register(qconf) == 0) + intr_en = 1; + else + RTE_LOG(INFO, L3FWD_POWER, "RX interrupt won't enable.\n"); + while (1) { stats[lcore_id].nb_iteration_looped++; @@ -809,20 +927,12 @@ main_loop(__attribute__((unused)) void *dummy) */ diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { - - /* - * This could be optimized (use queueid instead of - * portid), but it is not called so often - */ - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - send_burst(&lcore_conf[lcore_id], - qconf->tx_mbufs[portid].len, - portid); - qconf->tx_mbufs[portid].len = 0; + for (i = 0; i < qconf->n_tx_port; ++i) { + portid = qconf->tx_port_id[i]; + rte_eth_tx_buffer_flush(portid, + qconf->tx_queue_id[portid], + qconf->tx_buffer[portid]); } - prev_tsc = cur_tsc; } @@ -832,6 +942,7 @@ main_loop(__attribute__((unused)) void *dummy) prev_tsc_power = cur_tsc_power; } +start_rx: /* * Read packet from RX queues */ @@ -845,6 +956,7 @@ main_loop(__attribute__((unused)) void *dummy) nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST); + stats[lcore_id].nb_rx_processed += nb_rx; if (unlikely(nb_rx == 0)) { /** @@ -907,10 +1019,13 @@ main_loop(__attribute__((unused)) void *dummy) rx_queue->freq_up_hint; } - if (lcore_scaleup_hint == FREQ_HIGHEST) - rte_power_freq_max(lcore_id); - else if (lcore_scaleup_hint == FREQ_HIGHER) - rte_power_freq_up(lcore_id); + if (lcore_scaleup_hint == FREQ_HIGHEST) { + if (rte_power_freq_max) + rte_power_freq_max(lcore_id); + } else if (lcore_scaleup_hint == FREQ_HIGHER) { + if (rte_power_freq_up) + rte_power_freq_up(lcore_id); + } } else { /** * All Rx queues empty in recent consecutive polls, @@ -925,16 +1040,23 @@ main_loop(__attribute__((unused)) void *dummy) lcore_idle_hint = rx_queue->idle_hint; } - if ( lcore_idle_hint < SLEEP_GEAR1_THRESHOLD) + if (lcore_idle_hint < SUSPEND_THRESHOLD) /** * execute "pause" instruction to avoid context - * switch for short sleep. + * switch which generally take hundred of + * microseconds for short sleep. */ rte_delay_us(lcore_idle_hint); - else - /* long sleep force runing thread to suspend */ - usleep(lcore_idle_hint); - + else { + /* suspend until rx interrupt trigges */ + if (intr_en) { + turn_on_intr(qconf); + sleep_until_rx_interrupt( + qconf->n_rx_queue); + } + /* start receiving packets immediately */ + goto start_rx; + } stats[lcore_id].sleep_time += lcore_idle_hint; } } @@ -1040,7 +1162,8 @@ print_usage(const char *prgname) " --config (port,queue,lcore): rx queues configuration\n" " --no-numa: optional, disable numa awareness\n" " --enable-jumbo: enable jumbo frame" - " which max packet len is PKTLEN in decimal (64-9600)\n", + " which max packet len is PKTLEN in decimal (64-9600)\n" + " --parse-ptype: parse packet type by software\n", prgname); } @@ -1134,6 +1257,8 @@ parse_config(const char *q_arg) return 0; } +#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype" + /* Parse the argument given in the command line of the application */ static int parse_args(int argc, char **argv) @@ -1146,6 +1271,7 @@ parse_args(int argc, char **argv) {"config", 1, 0, 0}, {"no-numa", 0, 0, 0}, {"enable-jumbo", 0, 0, 0}, + {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0}, {NULL, 0, 0, 0} }; @@ -1216,6 +1342,13 @@ parse_args(int argc, char **argv) (unsigned int)port_conf.rxmode.max_rx_pkt_len); } + if (!strncmp(lgopts[option_index].name, + CMD_LINE_OPT_PARSE_PTYPE, + sizeof(CMD_LINE_OPT_PARSE_PTYPE))) { + printf("soft parse-ptype is enabled\n"); + parse_ptype = 1; + } + break; default: @@ -1247,7 +1380,6 @@ setup_hash(int socketid) struct rte_hash_parameters ipv4_l3fwd_hash_params = { .name = NULL, .entries = L3FWD_HASH_ENTRIES, - .bucket_entries = 4, .key_len = sizeof(struct ipv4_5tuple), .hash_func = DEFAULT_HASH_FUNC, .hash_func_init_val = 0, @@ -1256,7 +1388,6 @@ setup_hash(int socketid) struct rte_hash_parameters ipv6_l3fwd_hash_params = { .name = NULL, .entries = L3FWD_HASH_ENTRIES, - .bucket_entries = 4, .key_len = sizeof(struct ipv6_5tuple), .hash_func = DEFAULT_HASH_FUNC, .hash_func_init_val = 0, @@ -1324,9 +1455,15 @@ setup_lpm(int socketid) char s[64]; /* create the LPM table */ + struct rte_lpm_config lpm_ipv4_config; + + lpm_ipv4_config.max_rules = IPV4_L3FWD_LPM_MAX_RULES; + lpm_ipv4_config.number_tbl8s = 256; + lpm_ipv4_config.flags = 0; + snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid); - ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid, - IPV4_L3FWD_LPM_MAX_RULES, 0); + ipv4_l3fwd_lookup_struct[socketid] = + rte_lpm_create(s, socketid, &lpm_ipv4_config); if (ipv4_l3fwd_lookup_struct[socketid] == NULL) rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table" " on socket %d\n", socketid); @@ -1436,7 +1573,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) continue; } /* clear all_ports_up flag if any link down */ - if (link.link_status == 0) { + if (link.link_status == ETH_LINK_DOWN) { all_ports_up = 0; break; } @@ -1459,6 +1596,50 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) } } +static int check_ptype(uint8_t portid) +{ + int i, ret; + int ptype_l3_ipv4 = 0; +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) + int ptype_l3_ipv6 = 0; +#endif + uint32_t ptype_mask = RTE_PTYPE_L3_MASK; + + ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0); + if (ret <= 0) + return 0; + + uint32_t ptypes[ret]; + + ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret); + for (i = 0; i < ret; ++i) { + if (ptypes[i] & RTE_PTYPE_L3_IPV4) + ptype_l3_ipv4 = 1; +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) + if (ptypes[i] & RTE_PTYPE_L3_IPV6) + ptype_l3_ipv6 = 1; +#endif + } + + if (ptype_l3_ipv4 == 0) + printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid); + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) + if (ptype_l3_ipv6 == 0) + printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid); +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) + if (ptype_l3_ipv4) +#else /* APP_LOOKUP_EXACT_MATCH */ + if (ptype_l3_ipv4 && ptype_l3_ipv6) +#endif + return 1; + + return 0; + +} + int main(int argc, char **argv) { @@ -1471,6 +1652,7 @@ main(int argc, char **argv) unsigned lcore_id; uint64_t hz; uint32_t n_tx_queue, nb_lcores; + uint32_t dev_rxq_num, dev_txq_num; uint8_t portid, nb_rx_queue, queue, socketid; /* catch SIGINT and restore cpufreq governor to ondemand */ @@ -1498,10 +1680,7 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); - nb_ports = rte_eth_dev_count(); - if (nb_ports > RTE_MAX_ETHPORTS) - nb_ports = RTE_MAX_ETHPORTS; if (check_port_config(nb_ports) < 0) rte_exit(EXIT_FAILURE, "check_port_config failed\n"); @@ -1520,10 +1699,19 @@ main(int argc, char **argv) printf("Initializing port %d ... ", portid ); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + dev_rxq_num = dev_info.max_rx_queues; + dev_txq_num = dev_info.max_tx_queues; + nb_rx_queue = get_port_n_rx_queues(portid); + if (nb_rx_queue > dev_rxq_num) + rte_exit(EXIT_FAILURE, + "Cannot configure not existed rxq: " + "port=%d\n", portid); + n_tx_queue = nb_lcores; - if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) - n_tx_queue = MAX_TX_QUEUE_PER_PORT; + if (n_tx_queue > dev_txq_num) + n_tx_queue = dev_txq_num; printf("Creating queues: nb_rxq=%d nb_txq=%u... ", nb_rx_queue, (unsigned)n_tx_queue ); ret = rte_eth_dev_configure(portid, nb_rx_queue, @@ -1541,12 +1729,31 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "init_mem failed\n"); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + /* Initialize TX buffers */ + qconf = &lcore_conf[lcore_id]; + qconf->tx_buffer[portid] = rte_zmalloc_socket("tx_buffer", + RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0, + rte_eth_dev_socket_id(portid)); + if (qconf->tx_buffer[portid] == NULL) + rte_exit(EXIT_FAILURE, "Can't allocate tx buffer for port %u\n", + (unsigned) portid); + + rte_eth_tx_buffer_init(qconf->tx_buffer[portid], MAX_PKT_BURST); + } + /* init one TX queue per couple (lcore,port) */ queueid = 0; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; + if (queueid >= dev_txq_num) + continue; + if (numa_on) socketid = \ (uint8_t)rte_lcore_to_socket_id(lcore_id); @@ -1570,6 +1777,9 @@ main(int argc, char **argv) qconf = &lcore_conf[lcore_id]; qconf->tx_queue_id[portid] = queueid; queueid++; + + qconf->tx_port_id[qconf->n_tx_port] = portid; + qconf->n_tx_port++; } printf("\n"); } @@ -1581,8 +1791,8 @@ main(int argc, char **argv) /* init power management library */ ret = rte_power_init(lcore_id); if (ret) - rte_exit(EXIT_FAILURE, "Power management library " - "initialization failed on core%u\n", lcore_id); + RTE_LOG(ERR, POWER, + "Library initialization failed on core %u\n", lcore_id); /* init timer structures for each enabled lcore */ rte_timer_init(&power_timers[lcore_id]); @@ -1615,6 +1825,14 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, " "port=%d\n", ret, portid); + + if (parse_ptype) { + if (add_cb_parse_ptype(portid, queueid) < 0) + rte_exit(EXIT_FAILURE, + "Fail to add ptype cb\n"); + } else if (!check_ptype(portid)) + rte_exit(EXIT_FAILURE, + "PMD can not provide needed ptypes\n"); } } @@ -1630,7 +1848,6 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, " "port=%d\n", ret, portid); - /* * If enabled, put device in promiscuous mode. * This allows IO forwarding mode to forward packets @@ -1639,6 +1856,8 @@ main(int argc, char **argv) */ if (promiscuous_on) rte_eth_promiscuous_enable(portid); + /* initialize spinlock for each port */ + rte_spinlock_init(&(locks[portid])); } check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);