X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Ftestpmd.h;h=6b901a894f0314c45c34dea33ae6b4baf3605f08;hb=39831d49d3e81146b1afa2d15de548d21a32d305;hp=c528db45006a72b6177c2144afb25f4af37c3067;hpb=6c3c22969563b06aa82714bb2a4e416e941bd065;p=dpdk.git diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index c528db4500..6b901a894f 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -12,6 +12,7 @@ #include #include #include +#include #define RTE_PORT_ALL (~(portid_t)0x0) @@ -42,6 +43,16 @@ */ #define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */ +/* + * The maximum number of segments per packet is used to configure + * buffer split feature, also specifies the maximum amount of + * optional Rx pools to allocate mbufs to split. + */ +#define MAX_SEGS_BUFFER_SPLIT 8 /**< nb_segs is a 8-bit unsigned char. */ + +/* The prefix of the mbuf pool names created by the application. */ +#define MBUF_POOL_NAME_PFX "mb_pool" + #define MAX_PKT_BURST 512 #define DEF_PKT_BURST 32 @@ -76,7 +87,6 @@ enum { /**< allocate mempool natively, use rte_pktmbuf_pool_create_extbuf */ }; -#ifdef RTE_TEST_PMD_RECORD_BURST_STATS /** * The data structure associated with RX and TX packet burst statistics * that are recorded for each forwarding stream. @@ -84,7 +94,6 @@ enum { struct pkt_burst_stats { unsigned int pkt_burst_spread[MAX_PKT_BURST]; }; -#endif /** Information for a given RSS type. */ struct rss_type_info { @@ -129,13 +138,18 @@ struct fwd_stream { uint64_t rx_bad_outer_l4_csum; /**< received packets has bad outer l4 checksum */ unsigned int gro_times; /**< GRO operation times */ -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t core_cycles; /**< used for RX and TX processing */ -#endif -#ifdef RTE_TEST_PMD_RECORD_BURST_STATS struct pkt_burst_stats rx_burst_stats; struct pkt_burst_stats tx_burst_stats; -#endif +}; + +/** + * Age action context types, must be included inside the age action + * context structure. + */ +enum age_action_context_type { + ACTION_AGE_CONTEXT_TYPE_FLOW, + ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION, }; /** Descriptor for a single flow. */ @@ -144,10 +158,40 @@ struct port_flow { struct port_flow *tmp; /**< Temporary linking. */ uint32_t id; /**< Flow rule ID. */ struct rte_flow *flow; /**< Opaque flow object returned by PMD. */ - struct rte_flow_conv_rule rule; /* Saved flow rule description. */ + struct rte_flow_conv_rule rule; /**< Saved flow rule description. */ + enum age_action_context_type age_type; /**< Age action context type. */ uint8_t data[]; /**< Storage for flow rule description */ }; +/* Descriptor for shared action */ +struct port_shared_action { + struct port_shared_action *next; /**< Next flow in list. */ + uint32_t id; /**< Shared action ID. */ + enum rte_flow_action_type type; /**< Action type. */ + struct rte_flow_shared_action *action; /**< Shared action handle. */ + enum age_action_context_type age_type; /**< Age action context type. */ +}; + +struct port_flow_tunnel { + LIST_ENTRY(port_flow_tunnel) chain; + struct rte_flow_action *pmd_actions; + struct rte_flow_item *pmd_items; + uint32_t id; + uint32_t num_pmd_actions; + uint32_t num_pmd_items; + struct rte_flow_tunnel tunnel; + struct rte_flow_action *actions; + struct rte_flow_item *items; +}; + +struct tunnel_ops { + uint32_t id; + char type[16]; + uint32_t enabled:1; + uint32_t actions:1; + uint32_t items:1; +}; + /** * The data structure associated with each port. */ @@ -178,6 +222,9 @@ struct rte_port { uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */ uint8_t slave_flag; /**< bonding slave port */ struct port_flow *flow_list; /**< Associated flows. */ + struct port_shared_action *actions_list; + /**< Associated shared actions. */ + LIST_HEAD(, port_flow_tunnel) flow_tunnel_list; const struct rte_eth_rxtx_callback *rx_dump_cb[RTE_MAX_QUEUES_PER_PORT+1]; const struct rte_eth_rxtx_callback *tx_dump_cb[RTE_MAX_QUEUES_PER_PORT+1]; /**< metadata value to insert in Tx packets. */ @@ -247,6 +294,7 @@ extern struct fwd_engine tx_only_engine; extern struct fwd_engine csum_fwd_engine; extern struct fwd_engine icmp_echo_engine; extern struct fwd_engine noisy_vnf_engine; +extern struct fwd_engine five_tuple_swap_fwd_engine; #ifdef RTE_LIBRTE_IEEE1588 extern struct fwd_engine ieee1588_fwd_engine; #endif @@ -300,6 +348,8 @@ extern uint16_t nb_rx_queue_stats_mappings; extern uint8_t xstats_hide_zero; /**< Hide zero values for xstats display */ /* globals used for configuration */ +extern uint8_t record_core_cycles; /**< Enables measurement of CPU cycles */ +extern uint8_t record_burst_stats; /**< Enables display of RX and TX bursts */ extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */ extern int testpmd_logtype; /**< Log type for testpmd logs */ extern uint8_t interactive; @@ -396,23 +446,36 @@ extern uint64_t noisy_lkup_num_reads_writes; extern uint8_t dcb_config; extern uint8_t dcb_test; -extern uint16_t mbuf_data_size; /**< Mbuf data space size. */ +extern uint32_t mbuf_data_size_n; +extern uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT]; +/**< Mbuf data space size. */ extern uint32_t param_total_num_mbufs; extern uint16_t stats_period; -#ifdef RTE_LIBRTE_LATENCY_STATS +extern uint16_t hairpin_mode; + +#ifdef RTE_LIB_LATENCYSTATS extern uint8_t latencystats_enabled; extern lcoreid_t latencystats_lcore_id; #endif -#ifdef RTE_LIBRTE_BITRATE +#ifdef RTE_LIB_BITRATESTATS extern lcoreid_t bitrate_lcore_id; extern uint8_t bitrate_enabled; #endif extern struct rte_fdir_conf fdir_conf; +/* + * Configuration of packet segments used to scatter received packets + * if some of split features is configured. + */ +extern uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; +extern uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ +extern uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; +extern uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ + /* * Configuration of packet segments used by the "txonly" processing engine. */ @@ -420,6 +483,8 @@ extern struct rte_fdir_conf fdir_conf; extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */ extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */ extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */ +extern uint32_t tx_pkt_times_intra; +extern uint32_t tx_pkt_times_inter; enum tx_pkt_split { TX_PKT_SPLIT_OFF, @@ -453,6 +518,7 @@ extern struct fwd_lcore **fwd_lcores; extern struct fwd_stream **fwd_streams; extern uint16_t vxlan_gpe_udp_port; /**< UDP port of tunnel VXLAN-GPE. */ +extern uint16_t geneve_udp_port; /**< UDP port of tunnel GENEVE. */ extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; @@ -605,17 +671,23 @@ current_fwd_lcore(void) /* Mbuf Pools */ static inline void -mbuf_poolname_build(unsigned int sock_id, char* mp_name, int name_size) +mbuf_poolname_build(unsigned int sock_id, char *mp_name, + int name_size, uint16_t idx) { - snprintf(mp_name, name_size, "mbuf_pool_socket_%u", sock_id); + if (!idx) + snprintf(mp_name, name_size, + MBUF_POOL_NAME_PFX "_%u", sock_id); + else + snprintf(mp_name, name_size, + MBUF_POOL_NAME_PFX "_%hu_%hu", (uint16_t)sock_id, idx); } static inline struct rte_mempool * -mbuf_pool_find(unsigned int sock_id) +mbuf_pool_find(unsigned int sock_id, uint16_t idx) { char pool_name[RTE_MEMPOOL_NAMESIZE]; - mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name)); + mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx); return rte_mempool_lookup((const char *)pool_name); } @@ -678,6 +750,34 @@ port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v) #define port_id_pci_reg_write(pt_id, reg_off, reg_value) \ port_pci_reg_write(&ports[(pt_id)], (reg_off), (reg_value)) +static inline void +get_start_cycles(uint64_t *start_tsc) +{ + if (record_core_cycles) + *start_tsc = rte_rdtsc(); +} + +static inline void +get_end_cycles(struct fwd_stream *fs, uint64_t start_tsc) +{ + if (record_core_cycles) + fs->core_cycles += rte_rdtsc() - start_tsc; +} + +static inline void +inc_rx_burst_stats(struct fwd_stream *fs, uint16_t nb_rx) +{ + if (record_burst_stats) + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +} + +static inline void +inc_tx_burst_stats(struct fwd_stream *fs, uint16_t nb_tx) +{ + if (record_burst_stats) + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +} + /* Prototypes */ unsigned int parse_item_list(char* str, const char* item_name, unsigned int max_items, @@ -694,6 +794,8 @@ void nic_stats_mapping_display(portid_t port_id); void device_infos_display(const char *identifier); void port_infos_display(portid_t port_id); void port_summary_display(portid_t port_id); +void port_eeprom_display(portid_t port_id); +void port_module_eeprom_display(portid_t port_id); void port_summary_header_display(void); void port_offload_cap_display(portid_t port_id); void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id); @@ -719,14 +821,26 @@ void port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value); void port_reg_display(portid_t port_id, uint32_t reg_off); void port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t value); +int port_shared_action_create(portid_t port_id, uint32_t id, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action); +int port_shared_action_destroy(portid_t port_id, + uint32_t n, const uint32_t *action); +struct rte_flow_shared_action *port_shared_action_get_by_id(portid_t port_id, + uint32_t id); +int port_shared_action_update(portid_t port_id, uint32_t id, + const struct rte_flow_action *action); int port_flow_validate(portid_t port_id, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, - const struct rte_flow_action *actions); + const struct rte_flow_action *actions, + const struct tunnel_ops *tunnel_ops); int port_flow_create(portid_t port_id, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, - const struct rte_flow_action *actions); + const struct rte_flow_action *actions, + const struct tunnel_ops *tunnel_ops); +int port_shared_action_query(portid_t port_id, uint32_t id); void update_age_action_context(const struct rte_flow_action *actions, struct port_flow *pf); int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule); @@ -736,6 +850,12 @@ int port_flow_query(portid_t port_id, uint32_t rule, const struct rte_flow_action *action); void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group); void port_flow_aged(portid_t port_id, uint8_t destroy); +const char *port_flow_tunnel_type(struct rte_flow_tunnel *tunnel); +struct port_flow_tunnel * +port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun); +void port_flow_tunnel_list(portid_t port_id); +void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id); +void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops); int port_flow_isolate(portid_t port_id, int set); void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id); @@ -769,10 +889,20 @@ void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_va void set_xstats_hide_zero(uint8_t on_off); +void set_record_core_cycles(uint8_t on_off); +void set_record_burst_stats(uint8_t on_off); void set_verbose_level(uint16_t vb_level); -void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs); +void set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs); +void show_rx_pkt_segments(void); +void set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs); +void show_rx_pkt_offsets(void); +void set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs); void show_tx_pkt_segments(void); +void set_tx_pkt_times(unsigned int *tx_times); +void show_tx_pkt_times(void); void set_tx_pkt_split(const char *name); +int parse_fec_mode(const char *name, enum rte_eth_fec_mode *mode); +void show_fec_capability(uint32_t num, struct rte_eth_fec_capa *speed_fec_capa); void set_nb_pkt_per_burst(uint16_t pkt_burst); char *list_pkt_forwarding_modes(void); char *list_pkt_forwarding_retry_modes(void); @@ -802,7 +932,9 @@ int all_ports_stopped(void); int port_is_stopped(portid_t port_id); int port_is_started(portid_t port_id); void pmd_test_exit(void); +#if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE) void fdir_get_infos(portid_t port_id); +#endif void fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg); void fdir_set_flex_payload(portid_t port_id, @@ -813,6 +945,11 @@ void port_rss_reta_info(portid_t port_id, void set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on); +int +rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); + int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate); int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk);