X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=examples%2Fl3fwd-acl%2Fmain.c;h=0e3daadf948f0664e26c35521c073a35b06e71cc;hb=fc4600fb252031b7bea8aa4b984539c8c1338f8d;hp=5d1053f1e8c2fbad443d428457dc81f469cfbfa8;hpb=361b2e9559fc8aab1c8043a4d4859e2a896e37a1;p=dpdk.git diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c index 5d1053f1e8..0e3daadf94 100644 --- a/examples/l3fwd-acl/main.c +++ b/examples/l3fwd-acl/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -48,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -64,7 +63,6 @@ #include #include #include -#include #include #include #include @@ -73,8 +71,9 @@ #include #include -#include "main.h" - +#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG +#define L3FWDACL_DEBUG +#endif #define DO_RFC_1812_CHECKS #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1 @@ -83,8 +82,6 @@ #define MEMPOOL_CACHE_SIZE 256 -#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) - /* * This expression is used to calculate the number of mbufs needed * depending on user input, taking into account memory for rx and tx hardware @@ -100,25 +97,6 @@ nb_lcores * MEMPOOL_CACHE_SIZE), \ (unsigned)8192) -/* - * RX and TX Prefetch, Host, and Write-back threshold values should be - * carefully set for optimal performance. Consult the network - * controller's datasheet and supporting DPDK documentation for guidance - * on how these parameters should be set. - */ -#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ -#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ -#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ - -/* - * These default values are optimized for use with the Intel(R) 82599 10 GbE - * Controller and the DPDK ixgbe PMD. Consider using other values for other - * network controllers and/or network drivers. - */ -#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ -#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ -#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ - #define MAX_PKT_BURST 32 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ @@ -143,11 +121,6 @@ static uint32_t enabled_port_mask; static int promiscuous_on; /**< Ports set in promiscuous mode off by default. */ static int numa_on = 1; /**< NUMA is enabled by default. */ -struct mbuf_table { - uint16_t len; - struct rte_mbuf *m_table[MAX_PKT_BURST]; -}; - struct lcore_rx_queue { uint8_t port_id; uint8_t queue_id; @@ -195,11 +168,8 @@ static struct rte_eth_conf port_conf = { .rx_adv_conf = { .rss_conf = { .rss_key = NULL, - .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV4_TCP - | ETH_RSS_IPV4_UDP - | ETH_RSS_IPV6 | ETH_RSS_IPV6_EX - | ETH_RSS_IPV6_TCP | ETH_RSS_IPV6_TCP_EX - | ETH_RSS_IPV6_UDP | ETH_RSS_IPV6_UDP_EX, + .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | + ETH_RSS_TCP | ETH_RSS_SCTP, }, }, .txmode = { @@ -207,26 +177,6 @@ static struct rte_eth_conf port_conf = { }, }; -static const struct rte_eth_rxconf rx_conf = { - .rx_thresh = { - .pthresh = RX_PTHRESH, - .hthresh = RX_HTHRESH, - .wthresh = RX_WTHRESH, - }, - .rx_free_thresh = 32, -}; - -static const struct rte_eth_txconf tx_conf = { - .tx_thresh = { - .pthresh = TX_PTHRESH, - .hthresh = TX_HTHRESH, - .wthresh = TX_WTHRESH, - }, - .tx_free_thresh = 0, /* Use PMD default values */ - .tx_rs_thresh = 0, /* Use PMD default values */ - .txq_flags = 0x0, -}; - static struct rte_mempool *pktmbuf_pool[NB_SOCKETS]; /***********************start of ACL part******************************/ @@ -234,7 +184,7 @@ static struct rte_mempool *pktmbuf_pool[NB_SOCKETS]; static inline int is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len); #endif -static inline int +static inline void send_single_packet(struct rte_mbuf *m, uint8_t port); #define MAX_ACL_RULE_NUM 100000 @@ -263,9 +213,9 @@ send_single_packet(struct rte_mbuf *m, uint8_t port); #define OFF_IPV42PROTO (offsetof(struct ipv4_hdr, next_proto_id)) #define OFF_IPV62PROTO (offsetof(struct ipv6_hdr, proto)) #define MBUF_IPV4_2PROTO(m) \ - (rte_pktmbuf_mtod((m), uint8_t *) + OFF_ETHHEAD + OFF_IPV42PROTO) + rte_pktmbuf_mtod_offset((m), uint8_t *, OFF_ETHHEAD + OFF_IPV42PROTO) #define MBUF_IPV6_2PROTO(m) \ - (rte_pktmbuf_mtod((m), uint8_t *) + OFF_ETHHEAD + OFF_IPV62PROTO) + rte_pktmbuf_mtod_offset((m), uint8_t *, OFF_ETHHEAD + OFF_IPV62PROTO) #define GET_CB_FIELD(in, fd, base, lim, dlm) do { \ unsigned long val; \ @@ -278,15 +228,6 @@ send_single_packet(struct rte_mbuf *m, uint8_t port); (in) = end + 1; \ } while (0) -#define CLASSIFY(context, data, res, num, cat) do { \ - if (scalar) \ - rte_acl_classify_scalar((context), (data), \ - (res), (num), (cat)); \ - else \ - rte_acl_classify((context), (data), \ - (res), (num), (cat)); \ -} while (0) - /* * ACL rules should have higher priorities than route ones to ensure ACL rule * always be found when input packets have multi-matches in the database. @@ -317,6 +258,23 @@ enum { NUM_FIELDS_IPV4 }; +/* + * That effectively defines order of IPV4VLAN classifications: + * - PROTO + * - VLAN (TAG and DOMAIN) + * - SRC IP ADDRESS + * - DST IP ADDRESS + * - PORTS (SRC and DST) + */ +enum { + RTE_ACL_IPV4VLAN_PROTO, + RTE_ACL_IPV4VLAN_VLAN, + RTE_ACL_IPV4VLAN_SRC, + RTE_ACL_IPV4VLAN_DST, + RTE_ACL_IPV4VLAN_PORTS, + RTE_ACL_IPV4VLAN_NUM +}; + struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { { .type = RTE_ACL_FIELD_TYPE_BITMASK, @@ -620,9 +578,9 @@ dump_acl4_rule(struct rte_mbuf *m, uint32_t sig) { uint32_t offset = sig & ~ACL_DENY_SIGNATURE; unsigned char a, b, c, d; - struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) - (rte_pktmbuf_mtod(m, unsigned char *) + - sizeof(struct ether_hdr)); + struct ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m, + struct ipv4_hdr *, + sizeof(struct ether_hdr)); uint32_t_to_char(rte_bswap32(ipv4_hdr->src_addr), &a, &b, &c, &d); printf("Packet Src:%hhu.%hhu.%hhu.%hhu ", a, b, c, d); @@ -644,9 +602,9 @@ dump_acl6_rule(struct rte_mbuf *m, uint32_t sig) { unsigned i; uint32_t offset = sig & ~ACL_DENY_SIGNATURE; - struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) - (rte_pktmbuf_mtod(m, unsigned char *) + - sizeof(struct ether_hdr)); + struct ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m, + struct ipv6_hdr *, + sizeof(struct ether_hdr)); printf("Packet Src"); for (i = 0; i < RTE_DIM(ipv6_hdr->src_addr); i += sizeof(uint16_t)) @@ -701,15 +659,12 @@ prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl, struct ipv4_hdr *ipv4_hdr; struct rte_mbuf *pkt = pkts_in[index]; - int type = pkt->ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV6_HDR); - - if (type == PKT_RX_IPV4_HDR) { - - ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, - unsigned char *) + sizeof(struct ether_hdr)); + if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) { + ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *, + sizeof(struct ether_hdr)); /* Check to make sure the packet is valid (RFC1812) */ - if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt.pkt_len) >= 0) { + if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt_len) >= 0) { /* Update time to live and header checksum */ --(ipv4_hdr->time_to_live); @@ -723,9 +678,7 @@ prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl, /* Not a valid IPv4 packet */ rte_pktmbuf_free(pkt); } - - } else if (type == PKT_RX_IPV6_HDR) { - + } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) { /* Fill acl structure */ acl->data_ipv6[acl->num_ipv6] = MBUF_IPV6_2PROTO(pkt); acl->m_ipv6[(acl->num_ipv6)++] = pkt; @@ -743,17 +696,12 @@ prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl, { struct rte_mbuf *pkt = pkts_in[index]; - int type = pkt->ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV6_HDR); - - if (type == PKT_RX_IPV4_HDR) { - + if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) { /* Fill acl structure */ acl->data_ipv4[acl->num_ipv4] = MBUF_IPV4_2PROTO(pkt); acl->m_ipv4[(acl->num_ipv4)++] = pkt; - - } else if (type == PKT_RX_IPV6_HDR) { - + } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) { /* Fill acl structure */ acl->data_ipv6[acl->num_ipv6] = MBUF_IPV6_2PROTO(pkt); acl->m_ipv6[(acl->num_ipv6)++] = pkt; @@ -801,9 +749,9 @@ send_one_packet(struct rte_mbuf *m, uint32_t res) /* in the ACL list, drop it */ #ifdef L3FWDACL_DEBUG if ((res & ACL_DENY_SIGNATURE) != 0) { - if (m->ol_flags & PKT_RX_IPV4_HDR) + if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) dump_acl4_rule(m, res); - else + else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) dump_acl6_rule(m, res); } #endif @@ -1097,13 +1045,13 @@ add_rules(const char *rule_path, fseek(fh, 0, SEEK_SET); - acl_rules = (uint8_t *)calloc(acl_num, rule_size); + acl_rules = calloc(acl_num, rule_size); if (NULL == acl_rules) rte_exit(EXIT_FAILURE, "%s: failed to malloc memory\n", __func__); - route_rules = (uint8_t *)calloc(route_num, rule_size); + route_rules = calloc(route_num, rule_size); if (NULL == route_rules) rte_exit(EXIT_FAILURE, "%s: failed to malloc memory\n", @@ -1204,7 +1152,7 @@ setup_acl(struct rte_acl_rule *route_base, int dim = ipv6 ? RTE_DIM(ipv6_defs) : RTE_DIM(ipv4_defs); /* Create ACL contexts */ - rte_snprintf(name, sizeof(name), "%s%d", + snprintf(name, sizeof(name), "%s%d", ipv6 ? L3FWD_ACL_IPV6_NAME : L3FWD_ACL_IPV4_NAME, socketid); @@ -1216,6 +1164,11 @@ setup_acl(struct rte_acl_rule *route_base, if ((context = rte_acl_create(&acl_param)) == NULL) rte_exit(EXIT_FAILURE, "Failed to create ACL context\n"); + if (parm_config.scalar && rte_acl_set_ctx_classify(context, + RTE_ACL_CLASSIFY_SCALAR) != 0) + rte_exit(EXIT_FAILURE, + "Failed to setup classify method for ACL context\n"); + if (rte_acl_add_rules(context, route_base, route_num) < 0) rte_exit(EXIT_FAILURE, "add rules failed\n"); @@ -1223,8 +1176,9 @@ setup_acl(struct rte_acl_rule *route_base, rte_exit(EXIT_FAILURE, "add rules failed\n"); /* Perform builds */ - acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES; + memset(&acl_build_param, 0, sizeof(acl_build_param)); + acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES; acl_build_param.num_fields = dim; memcpy(&acl_build_param.defs, ipv6 ? ipv6_defs : ipv4_defs, ipv6 ? sizeof(ipv6_defs) : sizeof(ipv4_defs)); @@ -1292,6 +1246,10 @@ app_acl_init(void) acl_log("Socket %d of lcore %u is out " "of range %d\n", socketid, lcore_id, NB_SOCKETS); + free(route_base_ipv4); + free(route_base_ipv6); + free(acl_base_ipv4); + free(acl_base_ipv6); return -1; } @@ -1330,56 +1288,26 @@ app_acl_init(void) struct lcore_conf { uint16_t n_rx_queue; struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t n_tx_port; + uint16_t tx_port_id[RTE_MAX_ETHPORTS]; uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; - struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; + struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; } __rte_cache_aligned; static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; -/* Send burst of packets on an output interface */ -static inline int -send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) -{ - struct rte_mbuf **m_table; - int ret; - uint16_t queueid; - - queueid = qconf->tx_queue_id[port]; - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - ret = rte_eth_tx_burst(port, queueid, m_table, n); - if (unlikely(ret < n)) { - do { - rte_pktmbuf_free(m_table[ret]); - } while (++ret < n); - } - - return 0; -} - /* Enqueue a single packet, and send burst if queue is filled */ -static inline int +static inline void send_single_packet(struct rte_mbuf *m, uint8_t port) { uint32_t lcore_id; - uint16_t len; struct lcore_conf *qconf; lcore_id = rte_lcore_id(); qconf = &lcore_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; + rte_eth_tx_buffer(port, qconf->tx_queue_id[port], + qconf->tx_buffer[port], m); } #ifdef DO_RFC_1812_CHECKS @@ -1436,10 +1364,8 @@ main_loop(__attribute__((unused)) void *dummy) int socketid; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; - int scalar = parm_config.scalar; prev_tsc = 0; - lcore_id = rte_lcore_id(); qconf = &lcore_conf[lcore_id]; socketid = rte_lcore_to_socket_id(lcore_id); @@ -1469,20 +1395,12 @@ main_loop(__attribute__((unused)) void *dummy) */ diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { - - /* - * This could be optimized (use queueid instead of - * portid), but it is not called so often - */ - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - if (qconf->tx_mbufs[portid].len == 0) - continue; - send_burst(&lcore_conf[lcore_id], - qconf->tx_mbufs[portid].len, - portid); - qconf->tx_mbufs[portid].len = 0; + for (i = 0; i < qconf->n_tx_port; ++i) { + portid = qconf->tx_port_id[i]; + rte_eth_tx_buffer_flush(portid, + qconf->tx_queue_id[portid], + qconf->tx_buffer[portid]); } - prev_tsc = cur_tsc; } @@ -1503,7 +1421,8 @@ main_loop(__attribute__((unused)) void *dummy) nb_rx); if (acl_search.num_ipv4) { - CLASSIFY(acl_config.acx_ipv4[socketid], + rte_acl_classify( + acl_config.acx_ipv4[socketid], acl_search.data_ipv4, acl_search.res_ipv4, acl_search.num_ipv4, @@ -1515,7 +1434,8 @@ main_loop(__attribute__((unused)) void *dummy) } if (acl_search.num_ipv6) { - CLASSIFY(acl_config.acx_ipv6[socketid], + rte_acl_classify( + acl_config.acx_ipv6[socketid], acl_search.data_ipv6, acl_search.res_ipv6, acl_search.num_ipv6, @@ -1709,7 +1629,7 @@ parse_config(const char *q_arg) if (size >= sizeof(s)) return -1; - rte_snprintf(s, sizeof(s), "%.*s", size, p); + snprintf(s, sizeof(s), "%.*s", size, p); if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) return -1; @@ -1856,20 +1776,16 @@ parse_args(int argc, char **argv) argv[optind-1] = prgname; ret = optind-1; - optind = 0; /* reset getopt lib */ + optind = 1; /* reset getopt lib */ return ret; } static void print_ethaddr(const char *name, const struct ether_addr *eth_addr) { - printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name, - eth_addr->addr_bytes[0], - eth_addr->addr_bytes[1], - eth_addr->addr_bytes[2], - eth_addr->addr_bytes[3], - eth_addr->addr_bytes[4], - eth_addr->addr_bytes[5]); + char buf[ETHER_ADDR_FMT_SIZE]; + ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); + printf("%s%s", name, buf); } static int @@ -1894,14 +1810,12 @@ init_mem(unsigned nb_mbuf) socketid, lcore_id, NB_SOCKETS); } if (pktmbuf_pool[socketid] == NULL) { - rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); + snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); pktmbuf_pool[socketid] = - rte_mempool_create(s, nb_mbuf, MBUF_SIZE, - MEMPOOL_CACHE_SIZE, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - socketid, 0); + rte_pktmbuf_pool_create(s, nb_mbuf, + MEMPOOL_CACHE_SIZE, 0, + RTE_MBUF_DEFAULT_BUF_SIZE, + socketid); if (pktmbuf_pool[socketid] == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", @@ -1946,7 +1860,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) continue; } /* clear all_ports_up flag if any link down */ - if (link.link_status == 0) { + if (link.link_status == ETH_LINK_DOWN) { all_ports_up = 0; break; } @@ -1970,9 +1884,11 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) } int -MAIN(int argc, char **argv) +main(int argc, char **argv) { struct lcore_conf *qconf; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf *txconf; int ret; unsigned nb_ports; uint16_t queueid; @@ -1999,12 +1915,7 @@ MAIN(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); - if (rte_eal_pci_probe() < 0) - rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); - nb_ports = rte_eth_dev_count(); - if (nb_ports > RTE_MAX_ETHPORTS) - nb_ports = RTE_MAX_ETHPORTS; if (check_port_config(nb_ports) < 0) rte_exit(EXIT_FAILURE, "check_port_config failed\n"); @@ -2049,6 +1960,22 @@ MAIN(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "init_mem failed\n"); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + /* Initialize TX buffers */ + qconf = &lcore_conf[lcore_id]; + qconf->tx_buffer[portid] = rte_zmalloc_socket("tx_buffer", + RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0, + rte_eth_dev_socket_id(portid)); + if (qconf->tx_buffer[portid] == NULL) + rte_exit(EXIT_FAILURE, "Can't allocate tx buffer for port %u\n", + (unsigned) portid); + + rte_eth_tx_buffer_init(qconf->tx_buffer[portid], MAX_PKT_BURST); + } + /* init one TX queue per couple (lcore,port) */ queueid = 0; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { @@ -2063,8 +1990,13 @@ MAIN(int argc, char **argv) printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); fflush(stdout); + + rte_eth_dev_info_get(portid, &dev_info); + txconf = &dev_info.default_txconf; + if (port_conf.rxmode.jumbo_frame) + txconf->txq_flags = 0; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, - socketid, &tx_conf); + socketid, txconf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " @@ -2073,6 +2005,9 @@ MAIN(int argc, char **argv) qconf = &lcore_conf[lcore_id]; qconf->tx_queue_id[portid] = queueid; queueid++; + + qconf->tx_port_id[qconf->n_tx_port] = portid; + qconf->n_tx_port++; } printf("\n"); } @@ -2098,7 +2033,7 @@ MAIN(int argc, char **argv) fflush(stdout); ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, - socketid, &rx_conf, + socketid, NULL, pktmbuf_pool[socketid]); if (ret < 0) rte_exit(EXIT_FAILURE,