X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fl2fwd-keepalive%2Fmain.c;h=60cccdb1084b297bc82330356dc77e038d5dfa74;hb=f03723017a2a5ea421df821eb0ff9a0bfcacff4f;hp=8d7b09ee230bdcee8fd82e4dce339bcfe8893a84;hpb=e64833f2273ac67becbca10d9f2f1598872dc99e;p=dpdk.git diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c index 8d7b09ee23..60cccdb108 100644 --- a/examples/l2fwd-keepalive/main.c +++ b/examples/l2fwd-keepalive/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -47,6 +47,7 @@ #include #include +#include #include #include #include @@ -65,12 +66,13 @@ #include #include #include -#include #include #include #include #include +#include "shm.h" + #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 #define NB_MBUF 8192 @@ -97,21 +99,16 @@ static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS]; static unsigned int l2fwd_rx_queue_per_lcore = 1; -struct mbuf_table { - unsigned len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - #define MAX_RX_QUEUE_PER_LCORE 16 #define MAX_TX_QUEUE_PER_PORT 16 struct lcore_queue_conf { unsigned n_rx_port; unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; - } __rte_cache_aligned; struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; +struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; + static const struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, @@ -192,58 +189,14 @@ print_stats(__attribute__((unused)) struct rte_timer *ptr_timer, printf("\n====================================================\n"); } -/* Send the burst of packets on an output interface */ -static int -l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) -{ - struct rte_mbuf **m_table; - unsigned ret; - unsigned queueid = 0; - - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); - port_statistics[port].tx += ret; - if (unlikely(ret < n)) { - port_statistics[port].dropped += (n - ret); - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - -/* Enqueue packets for TX and prepare them to be sent */ -static int -l2fwd_send_packet(struct rte_mbuf *m, uint8_t port) -{ - unsigned lcore_id, len; - struct lcore_queue_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_queue_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - l2fwd_send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; -} - static void l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) { struct ether_hdr *eth; void *tmp; + int sent; unsigned dst_port; + struct rte_eth_dev_tx_buffer *buffer; dst_port = l2fwd_dst_ports[portid]; eth = rte_pktmbuf_mtod(m, struct ether_hdr *); @@ -255,7 +208,10 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) /* src addr */ ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); - l2fwd_send_packet(m, (uint8_t) dst_port); + buffer = tx_buffer[dst_port]; + sent = rte_eth_tx_buffer(dst_port, 0, buffer, m); + if (sent) + port_statistics[dst_port].tx += sent; } /* main processing loop */ @@ -264,12 +220,14 @@ l2fwd_main_loop(void) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_mbuf *m; + int sent; unsigned lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; unsigned i, j, portid, nb_rx; struct lcore_queue_conf *qconf; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; + struct rte_eth_dev_tx_buffer *buffer; prev_tsc = 0; @@ -312,13 +270,15 @@ l2fwd_main_loop(void) diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - l2fwd_send_burst(&lcore_queue_conf[lcore_id], - qconf->tx_mbufs[portid].len, - (uint8_t) portid); - qconf->tx_mbufs[portid].len = 0; + for (i = 0; i < qconf->n_rx_port; i++) { + + portid = l2fwd_dst_ports[qconf->rx_port_list[i]]; + buffer = tx_buffer[portid]; + + sent = rte_eth_tx_buffer_flush(portid, 0, buffer); + if (sent) + port_statistics[portid].tx += sent; + } prev_tsc = cur_tsc; @@ -471,7 +431,7 @@ l2fwd_parse_args(int argc, char **argv) /* timer period */ case 'T': timer_period = l2fwd_parse_timer_period(optarg) - * 1000 * TIMER_MILLISECOND; + * (int64_t)(1000 * TIMER_MILLISECOND); if (timer_period < 0) { printf("invalid timer period\n"); l2fwd_usage(prgname); @@ -540,7 +500,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) continue; } /* clear all_ports_up flag if any link down */ - if (link.link_status == 0) { + if (link.link_status == ETH_LINK_DOWN) { all_ports_up = 0; break; } @@ -564,7 +524,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) } static void -dead_core(__attribute__((unused)) void *ptr_data, const int id_core) +dead_core(__rte_unused void *ptr_data, const int id_core) { printf("Dead core %i - restarting..\n", id_core); if (rte_eal_get_lcore_state(id_core) == FINISHED) { @@ -575,6 +535,14 @@ dead_core(__attribute__((unused)) void *ptr_data, const int id_core) } } +static void +relay_core_state(void *ptr_data, const int id_core, + const enum rte_keepalive_state core_state, uint64_t last_alive) +{ + rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data, + id_core, core_state, last_alive); +} + int main(int argc, char **argv) { @@ -611,9 +579,6 @@ main(int argc, char **argv) if (nb_ports == 0) rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); - if (nb_ports > RTE_MAX_ETHPORTS) - nb_ports = RTE_MAX_ETHPORTS; - /* reset l2fwd_dst_ports */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) l2fwd_dst_ports[portid] = 0; @@ -713,6 +678,23 @@ main(int argc, char **argv) "rte_eth_tx_queue_setup:err=%d, port=%u\n", ret, (unsigned) portid); + /* Initialize TX buffers */ + tx_buffer[portid] = rte_zmalloc_socket("tx_buffer", + RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0, + rte_eth_dev_socket_id(portid)); + if (tx_buffer[portid] == NULL) + rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n", + (unsigned) portid); + + rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST); + + ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid], + rte_eth_tx_buffer_count_callback, + &port_statistics[portid].dropped); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot set error callback for " + "tx buffer on port %u\n", (unsigned) portid); + /* Start device */ ret = rte_eth_dev_start(portid); if (ret < 0) @@ -749,10 +731,18 @@ main(int argc, char **argv) rte_timer_init(&stats_timer); if (check_period > 0) { + struct rte_keepalive_shm *ka_shm; + + ka_shm = rte_keepalive_shm_create(); + if (ka_shm == NULL) + rte_exit(EXIT_FAILURE, + "rte_keepalive_shm_create() failed"); rte_global_keepalive_info = - rte_keepalive_create(&dead_core, NULL); + rte_keepalive_create(&dead_core, ka_shm); if (rte_global_keepalive_info == NULL) rte_exit(EXIT_FAILURE, "init_keep_alive() failed"); + rte_keepalive_register_relay_callback(rte_global_keepalive_info, + relay_core_state, ka_shm); rte_timer_init(&hb_timer); if (rte_timer_reset(&hb_timer, (check_period * rte_get_timer_hz()) / 1000,