X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fflow_classify%2Fflow_classify.c;h=433e64d3f901be23f0bc1bbf2396a16759a5b084;hb=32440cdf2af9ad38fd32a533f51a32da92345007;hp=45b3c1baaead3f67f48eaa00c7ddef81d2476719;hpb=3998e2a07220844d3f3c17f76a781ced3efe0de0;p=dpdk.git diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c index 45b3c1baae..433e64d3f9 100644 --- a/examples/flow_classify/flow_classify.c +++ b/examples/flow_classify/flow_classify.c @@ -15,8 +15,8 @@ #include #include -#define RX_RING_SIZE 128 -#define TX_RING_SIZE 512 +#define RX_RING_SIZE 1024 +#define TX_RING_SIZE 1024 #define NUM_MBUFS 8191 #define MBUF_CACHE_SIZE 250 @@ -60,12 +60,13 @@ static struct{ const char cb_port_delim[] = ":"; static const struct rte_eth_conf port_conf_default = { - .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN } + .rxmode = { + .max_rx_pkt_len = RTE_ETHER_MAX_LEN, + }, }; struct flow_classifier { struct rte_flow_classifier *cls; - uint32_t table_id[RTE_FLOW_CLASSIFY_TABLE_MAX]; }; struct flow_classifier_acl { @@ -97,8 +98,8 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .size = sizeof(uint8_t), .field_index = PROTO_FIELD_IPV4, .input_index = PROTO_INPUT_IPV4, - .offset = sizeof(struct ether_hdr) + - offsetof(struct ipv4_hdr, next_proto_id), + .offset = sizeof(struct rte_ether_hdr) + + offsetof(struct rte_ipv4_hdr, next_proto_id), }, /* next input field (IPv4 source address) - 4 consecutive bytes. */ { @@ -107,8 +108,8 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .size = sizeof(uint32_t), .field_index = SRC_FIELD_IPV4, .input_index = SRC_INPUT_IPV4, - .offset = sizeof(struct ether_hdr) + - offsetof(struct ipv4_hdr, src_addr), + .offset = sizeof(struct rte_ether_hdr) + + offsetof(struct rte_ipv4_hdr, src_addr), }, /* next input field (IPv4 destination address) - 4 consecutive bytes. */ { @@ -117,8 +118,8 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .size = sizeof(uint32_t), .field_index = DST_FIELD_IPV4, .input_index = DST_INPUT_IPV4, - .offset = sizeof(struct ether_hdr) + - offsetof(struct ipv4_hdr, dst_addr), + .offset = sizeof(struct rte_ether_hdr) + + offsetof(struct rte_ipv4_hdr, dst_addr), }, /* * Next 2 fields (src & dst ports) form 4 consecutive bytes. @@ -130,9 +131,9 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .size = sizeof(uint16_t), .field_index = SRCP_FIELD_IPV4, .input_index = SRCP_DESTP_INPUT_IPV4, - .offset = sizeof(struct ether_hdr) + - sizeof(struct ipv4_hdr) + - offsetof(struct tcp_hdr, src_port), + .offset = sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr) + + offsetof(struct rte_tcp_hdr, src_port), }, { /* rte_flow uses a bit mask for protocol ports */ @@ -140,9 +141,9 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .size = sizeof(uint16_t), .field_index = DSTP_FIELD_IPV4, .input_index = SRCP_DESTP_INPUT_IPV4, - .offset = sizeof(struct ether_hdr) + - sizeof(struct ipv4_hdr) + - offsetof(struct tcp_hdr, dst_port), + .offset = sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr) + + offsetof(struct rte_tcp_hdr, dst_port), }, }; @@ -166,7 +167,15 @@ static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END, /* sample actions: * "actions count / end" */ -static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, 0}; +struct rte_flow_query_count count = { + .reset = 1, + .hits_set = 1, + .bytes_set = 1, + .hits = 0, + .bytes = 0, +}; +static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, + &count}; static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0}; static struct rte_flow_action actions[2]; @@ -183,14 +192,27 @@ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_conf port_conf = port_conf_default; - struct ether_addr addr; + struct rte_ether_addr addr; const uint16_t rx_rings = 1, tx_rings = 1; int retval; uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; - if (port >= rte_eth_dev_count()) + if (!rte_eth_dev_is_valid_port(port)) return -1; + retval = rte_eth_dev_info_get(port, &dev_info); + if (retval != 0) { + printf("Error during getting device (port %u) info: %s\n", + port, strerror(-retval)); + return retval; + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + /* Configure the Ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) @@ -204,10 +226,12 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) return retval; } + txconf = dev_info.default_txconf; + txconf.offloads = port_conf.txmode.offloads; /* Allocate and set up 1 TX queue per Ethernet port. */ for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE, - rte_eth_dev_socket_id(port), NULL); + rte_eth_dev_socket_id(port), &txconf); if (retval < 0) return retval; } @@ -218,7 +242,10 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) return retval; /* Display the port MAC address. */ - rte_eth_macaddr_get(port, &addr); + retval = rte_eth_macaddr_get(port, &addr); + if (retval != 0) + return retval; + printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n", port, @@ -227,7 +254,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) addr.addr_bytes[4], addr.addr_bytes[5]); /* Enable RX in promiscuous mode for the Ethernet device. */ - rte_eth_promiscuous_enable(port); + retval = rte_eth_promiscuous_enable(port); + if (retval != 0) + return retval; return 0; } @@ -236,16 +265,15 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) * The lcore main. This is the main thread that does the work, reading from * an input port classifying the packets and writing to an output port. */ -static __attribute__((noreturn)) void +static __rte_noreturn void lcore_main(struct flow_classifier *cls_app) { - const uint8_t nb_ports = rte_eth_dev_count(); - uint8_t port; + uint16_t port; int ret; int i = 0; ret = rte_flow_classify_table_entry_delete(cls_app->cls, - cls_app->table_id[0], rules[7]); + rules[7]); if (ret) printf("table_entry_delete failed [7] %d\n\n", ret); else @@ -255,7 +283,7 @@ lcore_main(struct flow_classifier *cls_app) * Check that the port is on the same NUMA node as the polling thread * for best performance. */ - for (port = 0; port < nb_ports; port++) + RTE_ETH_FOREACH_DEV(port) if (rte_eth_dev_socket_id(port) > 0 && rte_eth_dev_socket_id(port) != (int)rte_socket_id()) { printf("\n\n"); @@ -263,11 +291,10 @@ lcore_main(struct flow_classifier *cls_app) port); printf("to polling thread.\n"); printf("Performance will not be optimal.\n"); - - printf("\nCore %u forwarding packets. ", - rte_lcore_id()); - printf("[Ctrl+C to quit]\n"); } + printf("\nCore %u forwarding packets. ", rte_lcore_id()); + printf("[Ctrl+C to quit]\n"); + /* Run until the application is quit or killed. */ for (;;) { /* @@ -275,7 +302,7 @@ lcore_main(struct flow_classifier *cls_app) * on the paired port. * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc. */ - for (port = 0; port < nb_ports; port++) { + RTE_ETH_FOREACH_DEV(port) { /* Get burst of RX packets, from first port of pair. */ struct rte_mbuf *bufs[BURST_SIZE]; const uint16_t nb_rx = rte_eth_rx_burst(port, 0, @@ -288,7 +315,6 @@ lcore_main(struct flow_classifier *cls_app) if (rules[i]) { ret = rte_flow_classifier_query( cls_app->cls, - cls_app->table_id[0], bufs, nb_rx, rules[i], &classify_stats); if (ret) @@ -364,7 +390,7 @@ parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len) if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0)) return -EINVAL; - addr[0] = IPv4(a, b, c, d); + addr[0] = RTE_IPV4(a, b, c, d); mask_len[0] = m; return 0; } @@ -605,9 +631,18 @@ add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter, actions[0] = count_action; actions[1] = end_action; + /* Validate and add rule */ + ret = rte_flow_classify_validate(cls_app->cls, &attr, + pattern_ipv4_5tuple, actions, &error); + if (ret) { + printf("table entry validate failed ipv4_proto = %u\n", + ipv4_proto); + return ret; + } + rule = rte_flow_classify_table_entry_add( - cls_app->cls, cls_app->table_id[0], &key_found, - &attr, pattern_ipv4_5tuple, actions, &error); + cls_app->cls, &attr, pattern_ipv4_5tuple, + actions, &key_found, &error); if (rule == NULL) { printf("table entry add failed ipv4_proto = %u\n", ipv4_proto); @@ -726,8 +761,8 @@ int main(int argc, char *argv[]) { struct rte_mempool *mbuf_pool; - uint8_t nb_ports; - uint8_t portid; + uint16_t nb_ports; + uint16_t portid; int ret; int socket_id; struct rte_table_acl_params table_acl_params; @@ -750,7 +785,7 @@ main(int argc, char *argv[]) rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n"); /* Check that there is an even number of ports to send/receive on. */ - nb_ports = rte_eth_dev_count(); + nb_ports = rte_eth_dev_count_avail(); if (nb_ports < 2 || (nb_ports & 1)) rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n"); @@ -762,7 +797,7 @@ main(int argc, char *argv[]) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); /* Initialize all ports. */ - for (portid = 0; portid < nb_ports; portid++) + RTE_ETH_FOREACH_DEV(portid) if (port_init(portid, mbuf_pool) != 0) rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n", portid); @@ -780,7 +815,6 @@ main(int argc, char *argv[]) cls_params.name = "flow_classifier"; cls_params.socket_id = socket_id; - cls_params.type = RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL; cls_app->cls = rte_flow_classifier_create(&cls_params); if (cls_app->cls == NULL) { @@ -795,11 +829,11 @@ main(int argc, char *argv[]) memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs)); /* initialise table create params */ - cls_table_params.ops = &rte_table_acl_ops, - cls_table_params.arg_create = &table_acl_params, + cls_table_params.ops = &rte_table_acl_ops; + cls_table_params.arg_create = &table_acl_params; + cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE; - ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params, - &cls_app->table_id[0]); + ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params); if (ret) { rte_flow_classifier_free(cls_app->cls); rte_free(cls_app);