#include <rte_pci.h>
#include <rte_bus_pci.h>
+#ifdef RTE_LIB_GRO
#include <rte_gro.h>
+#endif
+#ifdef RTE_LIB_GSO
#include <rte_gso.h>
+#endif
#include <rte_os_shim.h>
#include <cmdline.h>
#include <sys/queue.h>
+#ifdef RTE_HAS_JANSSON
+#include <jansson.h>
+#endif
#define RTE_PORT_ALL (~(portid_t)0x0)
* that are recorded for each forwarding stream.
*/
struct pkt_burst_stats {
- unsigned int pkt_burst_spread[MAX_PKT_BURST];
+ unsigned int pkt_burst_spread[MAX_PKT_BURST + 1];
};
/** Information for a given RSS type. */
/**< received packets has bad outer l4 checksum */
uint64_t rx_bad_outer_ip_csum;
/**< received packets having bad outer ip checksum */
+ uint64_t ts_skew; /**< TX scheduling timestamp */
+#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
+#endif
uint64_t core_cycles; /**< used for RX and TX processing */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
+ struct fwd_lcore *lcore; /**< Lcore being scheduled. */
};
/**
ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION,
};
+/** Descriptor for a template. */
+struct port_template {
+ struct port_template *next; /**< Next template in list. */
+ struct port_template *tmp; /**< Temporary linking. */
+ uint32_t id; /**< Template ID. */
+ union {
+ struct rte_flow_pattern_template *pattern_template;
+ struct rte_flow_actions_template *actions_template;
+ } template; /**< PMD opaque template object */
+};
+
/** Descriptor for a single flow. */
struct port_flow {
struct port_flow *next; /**< Next flow in list. */
enum rte_flow_action_type type; /**< Action type. */
struct rte_flow_action_handle *handle; /**< Indirect action handle. */
enum age_action_context_type age_type; /**< Age action context type. */
- /** If true, the action applies to "transfer" flows, and vice versa */
- bool transfer;
};
struct port_flow_tunnel {
struct rte_eth_txconf tx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx configuration */
struct rte_ether_addr *mc_addr_pool; /**< pool of multicast addrs */
uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
+ queueid_t queue_nb; /**< nb. of queues for flow rules */
+ uint32_t queue_sz; /**< size of a queue for flow rules */
uint8_t slave_flag; /**< bonding slave port */
+ struct port_template *pattern_templ_list; /**< Pattern templates. */
+ struct port_template *actions_templ_list; /**< Actions templates. */
struct port_flow *flow_list; /**< Associated flows. */
struct port_indirect_action *actions_list;
/**< Associated indirect actions. */
/**< dynamic flags. */
uint64_t mbuf_dynf;
const struct rte_eth_rxtx_callback *tx_set_dynf_cb[RTE_MAX_QUEUES_PER_PORT+1];
- /** Associated port which is supposed to handle "transfer" flows */
- portid_t flow_transfer_proxy;
struct xstat_display_info xstats_info;
};
* CPU id. configuration table.
*/
struct fwd_lcore {
+#ifdef RTE_LIB_GSO
struct rte_gso_ctx gso_ctx; /**< GSO context */
+#endif
struct rte_mempool *mbp; /**< The mbuf pool to use by this core */
+#ifdef RTE_LIB_GRO
void *gro_ctx; /**< GRO context */
+#endif
streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
packet_fwd_t packet_fwd; /**< Mandatory. */
};
+#define FLEX_ITEM_MAX_SAMPLES_NUM 16
+#define FLEX_ITEM_MAX_LINKS_NUM 16
+#define FLEX_MAX_FLOW_PATTERN_LENGTH 64
+#define FLEX_MAX_PARSERS_NUM 8
+#define FLEX_MAX_PATTERNS_NUM 64
+#define FLEX_PARSER_ERR ((struct flex_item *)-1)
+
+struct flex_item {
+ struct rte_flow_item_flex_conf flex_conf;
+ struct rte_flow_item_flex_handle *flex_handle;
+ uint32_t flex_id;
+};
+
+struct flex_pattern {
+ struct rte_flow_item_flex spec, mask;
+ uint8_t spec_pattern[FLEX_MAX_FLOW_PATTERN_LENGTH];
+ uint8_t mask_pattern[FLEX_MAX_FLOW_PATTERN_LENGTH];
+};
+extern struct flex_item *flex_items[RTE_MAX_ETHPORTS][FLEX_MAX_PARSERS_NUM];
+extern struct flex_pattern flex_patterns[FLEX_MAX_PATTERNS_NUM];
+
#define BURST_TX_WAIT_US 1
#define BURST_TX_RETRIES 64
#ifdef RTE_LIBRTE_IEEE1588
extern struct fwd_engine ieee1588_fwd_engine;
#endif
+extern struct fwd_engine shared_rxq_engine;
extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */
extern cmdline_parse_inst_t cmd_set_raw;
extern cmdline_parse_inst_t cmd_show_set_raw;
extern cmdline_parse_inst_t cmd_show_set_raw_all;
+extern cmdline_parse_inst_t cmd_set_flex_is_pattern;
+extern cmdline_parse_inst_t cmd_set_flex_spec_pattern;
extern uint16_t mempool_flags;
extern uint8_t bitrate_enabled;
#endif
-extern struct rte_fdir_conf fdir_conf;
+extern struct rte_eth_fdir_conf fdir_conf;
+
+extern uint32_t max_rx_pkt_len;
/*
* Configuration of packet segments used to scatter received packets
extern uint8_t txonly_multi_flow;
+extern uint32_t rxq_share;
+
extern uint16_t nb_pkt_per_burst;
extern uint16_t nb_pkt_flowgen_clones;
extern int nb_flows_flowgen;
extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */
+#ifdef RTE_LIB_GRO
#define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32
#define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \
GRO_DEFAULT_ITEM_NUM_PER_FLOW)
};
extern struct gro_status gro_ports[RTE_MAX_ETHPORTS];
extern uint8_t gro_flush_cycles;
+#endif /* RTE_LIB_GRO */
+#ifdef RTE_LIB_GSO
#define GSO_MAX_PKT_BURST 2048
struct gso_status {
uint8_t enable;
};
extern struct gso_status gso_ports[RTE_MAX_ETHPORTS];
extern uint16_t gso_max_segment_size;
+#endif /* RTE_LIB_GSO */
/* VXLAN encap/decap parameters. */
struct vxlan_encap_conf {
void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
void fwd_lcores_config_display(void);
+bool pkt_fwd_shared_rxq_check(void);
void pkt_fwd_config_display(struct fwd_config *cfg);
void rxtx_config_display(void);
void fwd_config_setup(void);
uint32_t id);
int port_action_handle_update(portid_t port_id, uint32_t id,
const struct rte_flow_action *action);
+int port_flow_get_info(portid_t port_id);
+int port_flow_configure(portid_t port_id,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr);
+int port_flow_pattern_template_create(portid_t port_id, uint32_t id,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item *pattern);
+int port_flow_pattern_template_destroy(portid_t port_id, uint32_t n,
+ const uint32_t *template);
+int port_flow_actions_template_create(portid_t port_id, uint32_t id,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action *actions,
+ const struct rte_flow_action *masks);
+int port_flow_actions_template_destroy(portid_t port_id, uint32_t n,
+ const uint32_t *template);
int port_flow_validate(portid_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
uint8_t *hash_key, uint8_t hash_key_len);
int rx_queue_id_is_invalid(queueid_t rxq_id);
int tx_queue_id_is_invalid(queueid_t txq_id);
+#ifdef RTE_LIB_GRO
void setup_gro(const char *onoff, portid_t port_id);
void setup_gro_flush_cycles(uint8_t cycles);
void show_gro(portid_t port_id);
+#endif
+#ifdef RTE_LIB_GSO
void setup_gso(const char *mode, portid_t port_id);
+#endif
int eth_dev_info_get_print_err(uint16_t port_id,
struct rte_eth_dev_info *dev_info);
int eth_dev_conf_get_print_err(uint16_t port_id,
__rte_unused void *user_param);
void add_tx_dynf_callback(portid_t portid);
void remove_tx_dynf_callback(portid_t portid);
+int update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen);
int update_jumbo_frame_offload(portid_t portid);
+void flex_item_create(portid_t port_id, uint16_t flex_id, const char *filename);
+void flex_item_destroy(portid_t port_id, uint16_t flex_id);
+void port_flex_item_flush(portid_t port_id);
+
+extern int flow_parse(const char *src, void *result, unsigned int size,
+ struct rte_flow_attr **attr,
+ struct rte_flow_item **pattern,
+ struct rte_flow_action **actions);
/*
* Work-around of a compilation error with ICC on invocations of the