From: Jiayu Hu Date: Sat, 7 Oct 2017 14:56:43 +0000 (+0800) Subject: app/testpmd: enable TCP/IPv4 VxLAN and GRE GSO X-Git-Tag: spdx-start~1586 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=52f38a2055ede3cd171658588df9ff2a84875227;p=dpdk.git app/testpmd: enable TCP/IPv4 VxLAN and GRE GSO This patch adds GSO support to the csum forwarding engine. Oversized packets transmitted over a GSO-enabled port will undergo segmentation (with the exception of packet-types unsupported by the GSO library). GSO support is disabled by default. GSO support may be toggled on a per-port basis, using the command: "set port gso on|off" The maximum packet length (including the packet header and payload) for GSO segments may be set with the command: "set gso segsz " Show GSO configuration for a given port with the command: "show port gso" Signed-off-by: Jiayu Hu Signed-off-by: Mark Kavanagh Acked-by: Konstantin Ananyev --- diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 516fc89d3c..b2d5284dfa 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -438,6 +438,17 @@ static void cmd_help_long_parsed(void *parsed_result, " Set the cycle to flush GROed packets from" " reassembly tables.\n\n" + "set port (port_id) gso (on|off)" + " Enable or disable Generic Segmentation Offload in" + " csum forwarding engine.\n\n" + + "set gso segsz (length)\n" + " Set max packet length for output GSO segments," + " including packet header and payload.\n\n" + + "show port (port_id) gso\n" + " Show GSO configuration.\n\n" + "set fwd (%s)\n" " Set packet forwarding mode.\n\n" @@ -4014,6 +4025,172 @@ cmdline_parse_inst_t cmd_gro_flush = { }, }; +/* *** ENABLE/DISABLE GSO *** */ +struct cmd_gso_enable_result { + cmdline_fixed_string_t cmd_set; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t cmd_mode; + portid_t cmd_pid; +}; + +static void +cmd_gso_enable_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_gso_enable_result *res; + + res = parsed_result; + if (!strcmp(res->cmd_keyword, "gso")) + setup_gso(res->cmd_mode, res->cmd_pid); +} + +cmdline_parse_token_string_t cmd_gso_enable_set = + TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result, + cmd_set, "set"); +cmdline_parse_token_string_t cmd_gso_enable_port = + TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result, + cmd_port, "port"); +cmdline_parse_token_string_t cmd_gso_enable_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result, + cmd_keyword, "gso"); +cmdline_parse_token_string_t cmd_gso_enable_mode = + TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result, + cmd_mode, "on#off"); +cmdline_parse_token_num_t cmd_gso_enable_pid = + TOKEN_NUM_INITIALIZER(struct cmd_gso_enable_result, + cmd_pid, UINT16); + +cmdline_parse_inst_t cmd_gso_enable = { + .f = cmd_gso_enable_parsed, + .data = NULL, + .help_str = "set port gso on|off", + .tokens = { + (void *)&cmd_gso_enable_set, + (void *)&cmd_gso_enable_port, + (void *)&cmd_gso_enable_pid, + (void *)&cmd_gso_enable_keyword, + (void *)&cmd_gso_enable_mode, + NULL, + }, +}; + +/* *** SET MAX PACKET LENGTH FOR GSO SEGMENTS *** */ +struct cmd_gso_size_result { + cmdline_fixed_string_t cmd_set; + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t cmd_segsz; + uint16_t cmd_size; +}; + +static void +cmd_gso_size_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_gso_size_result *res = parsed_result; + + if (test_done == 0) { + printf("Before setting GSO segsz, please first" + " stop fowarding\n"); + return; + } + + if (!strcmp(res->cmd_keyword, "gso") && + !strcmp(res->cmd_segsz, "segsz")) { + if (res->cmd_size < RTE_GSO_SEG_SIZE_MIN) + printf("gso_size should be larger than %zu." + " Please input a legal value\n", + RTE_GSO_SEG_SIZE_MIN); + else + gso_max_segment_size = res->cmd_size; + } +} + +cmdline_parse_token_string_t cmd_gso_size_set = + TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result, + cmd_set, "set"); +cmdline_parse_token_string_t cmd_gso_size_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result, + cmd_keyword, "gso"); +cmdline_parse_token_string_t cmd_gso_size_segsz = + TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result, + cmd_segsz, "segsz"); +cmdline_parse_token_num_t cmd_gso_size_size = + TOKEN_NUM_INITIALIZER(struct cmd_gso_size_result, + cmd_size, UINT16); + +cmdline_parse_inst_t cmd_gso_size = { + .f = cmd_gso_size_parsed, + .data = NULL, + .help_str = "set gso segsz ", + .tokens = { + (void *)&cmd_gso_size_set, + (void *)&cmd_gso_size_keyword, + (void *)&cmd_gso_size_segsz, + (void *)&cmd_gso_size_size, + NULL, + }, +}; + +/* *** SHOW GSO CONFIGURATION *** */ +struct cmd_gso_show_result { + cmdline_fixed_string_t cmd_show; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + portid_t cmd_pid; +}; + +static void +cmd_gso_show_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_gso_show_result *res = parsed_result; + + if (!rte_eth_dev_is_valid_port(res->cmd_pid)) { + printf("invalid port id %u\n", res->cmd_pid); + return; + } + if (!strcmp(res->cmd_keyword, "gso")) { + if (gso_ports[res->cmd_pid].enable) { + printf("Max GSO'd packet size: %uB\n" + "Supported GSO types: TCP/IPv4, " + "VxLAN with inner TCP/IPv4 packet, " + "GRE with inner TCP/IPv4 packet\n", + gso_max_segment_size); + } else + printf("GSO is not enabled on Port %u\n", res->cmd_pid); + } +} + +cmdline_parse_token_string_t cmd_gso_show_show = +TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result, + cmd_show, "show"); +cmdline_parse_token_string_t cmd_gso_show_port = +TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result, + cmd_port, "port"); +cmdline_parse_token_string_t cmd_gso_show_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result, + cmd_keyword, "gso"); +cmdline_parse_token_num_t cmd_gso_show_pid = + TOKEN_NUM_INITIALIZER(struct cmd_gso_show_result, + cmd_pid, UINT16); + +cmdline_parse_inst_t cmd_gso_show = { + .f = cmd_gso_show_parsed, + .data = NULL, + .help_str = "show port gso", + .tokens = { + (void *)&cmd_gso_show_show, + (void *)&cmd_gso_show_port, + (void *)&cmd_gso_show_pid, + (void *)&cmd_gso_show_keyword, + NULL, + }, +}; + /* *** ENABLE/DISABLE FLUSH ON RX STREAMS *** */ struct cmd_set_flush_rx { cmdline_fixed_string_t set; @@ -14723,6 +14900,9 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_gro_enable, (cmdline_parse_inst_t *)&cmd_gro_flush, (cmdline_parse_inst_t *)&cmd_gro_show, + (cmdline_parse_inst_t *)&cmd_gso_enable, + (cmdline_parse_inst_t *)&cmd_gso_size, + (cmdline_parse_inst_t *)&cmd_gso_show, (cmdline_parse_inst_t *)&cmd_link_flow_control_set, (cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx, (cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx, diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 90e4f19080..d04940c3d1 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -2510,6 +2510,30 @@ show_gro(portid_t port_id) printf("Port %u doesn't enable GRO.\n", port_id); } +void +setup_gso(const char *mode, portid_t port_id) +{ + if (!rte_eth_dev_is_valid_port(port_id)) { + printf("invalid port id %u\n", port_id); + return; + } + if (strcmp(mode, "on") == 0) { + if (test_done == 0) { + printf("before enabling GSO," + " please stop forwarding first\n"); + return; + } + gso_ports[port_id].enable = 1; + } else if (strcmp(mode, "off") == 0) { + if (test_done == 0) { + printf("before disabling GSO," + " please stop forwarding first\n"); + return; + } + gso_ports[port_id].enable = 0; + } +} + char* list_pkt_forwarding_modes(void) { diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index ca50ab7099..34fe8cc7ff 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -70,6 +70,8 @@ #include #include #include +#include + #include "testpmd.h" #define IP_DEFTTL 64 /* from RFC 1340. */ @@ -91,6 +93,7 @@ /* structure that caches offload info for the current packet */ struct testpmd_offload_info { uint16_t ethertype; + uint8_t gso_enable; uint16_t l2_len; uint16_t l3_len; uint16_t l4_len; @@ -381,6 +384,8 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, get_udptcp_checksum(l3_hdr, tcp_hdr, info->ethertype); } + if (info->gso_enable) + ol_flags |= PKT_TX_TCP_SEG; } else if (info->l4_proto == IPPROTO_SCTP) { sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len); sctp_hdr->cksum = 0; @@ -627,6 +632,9 @@ static void pkt_burst_checksum_forward(struct fwd_stream *fs) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST]; + struct rte_gso_ctx *gso_ctx; + struct rte_mbuf **tx_pkts_burst; struct rte_port *txp; struct rte_mbuf *m, *p; struct ether_hdr *eth_hdr; @@ -644,6 +652,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) uint32_t rx_bad_ip_csum; uint32_t rx_bad_l4_csum; struct testpmd_offload_info info; + uint16_t nb_segments = 0; + int ret; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; @@ -673,6 +683,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) memset(&info, 0, sizeof(info)); info.tso_segsz = txp->tso_segsz; info.tunnel_tso_segsz = txp->tunnel_tso_segsz; + if (gso_ports[fs->tx_port].enable) + info.gso_enable = 1; for (i = 0; i < nb_rx; i++) { if (likely(i < nb_rx - 1)) @@ -872,13 +884,35 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) } } + if (gso_ports[fs->tx_port].enable == 0) + tx_pkts_burst = pkts_burst; + else { + gso_ctx = &(current_fwd_lcore()->gso_ctx); + gso_ctx->gso_size = gso_max_segment_size; + for (i = 0; i < nb_rx; i++) { + ret = rte_gso_segment(pkts_burst[i], gso_ctx, + &gso_segments[nb_segments], + GSO_MAX_PKT_BURST - nb_segments); + if (ret >= 0) + nb_segments += ret; + else { + RTE_LOG(DEBUG, USER1, + "Unable to segment packet"); + rte_pktmbuf_free(pkts_burst[i]); + } + } + + tx_pkts_burst = gso_segments; + nb_rx = nb_segments; + } + nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue, - pkts_burst, nb_rx); + tx_pkts_burst, nb_rx); if (nb_prep != nb_rx) printf("Preparing packet burst to transmit failed: %s\n", rte_strerror(rte_errno)); - nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst, nb_prep); /* @@ -889,7 +923,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { rte_delay_us(burst_tx_delay_time); nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, - &pkts_burst[nb_tx], nb_rx - nb_tx); + &tx_pkts_burst[nb_tx], nb_rx - nb_tx); } } fs->tx_packets += nb_tx; @@ -902,9 +936,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) if (unlikely(nb_tx < nb_rx)) { fs->fwd_dropped += (nb_rx - nb_tx); do { - rte_pktmbuf_free(pkts_burst[nb_tx]); + rte_pktmbuf_free(tx_pkts_burst[nb_tx]); } while (++nb_tx < nb_rx); } + #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES end_tsc = rte_rdtsc(); core_cycles = (end_tsc - start_tsc); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4317a177b2..57a6f895c2 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -398,6 +398,9 @@ static int eth_event_callback(portid_t port_id, */ static int all_ports_started(void); +struct gso_status gso_ports[RTE_MAX_ETHPORTS]; +uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN; + /* * Helper function to check if socket is already discovered. * If yes, return positive value. If not, return zero. @@ -557,6 +560,7 @@ init_config(void) lcoreid_t lc_id; uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; struct rte_gro_param gro_param; + uint32_t gso_types; memset(port_per_socket,0,RTE_MAX_NUMA_NODES); @@ -641,6 +645,8 @@ init_config(void) init_port_config(); + gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO; /* * Records which Mbuf pool to use by each logical core, if needed. */ @@ -651,6 +657,13 @@ init_config(void) if (mbp == NULL) mbp = mbuf_pool_find(0); fwd_lcores[lc_id]->mbp = mbp; + /* initialize GSO context */ + fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; + fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; + fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; + fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN - + ETHER_CRC_LEN; + fwd_lcores[lc_id]->gso_ctx.flag = 0; } /* Configuration of packet forwarding streams. */ diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index 2dc3b74682..e2d9e34e1d 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -36,6 +36,7 @@ #include #include +#include #define RTE_PORT_ALL (~(portid_t)0x0) @@ -206,6 +207,7 @@ struct rte_port { * CPU id. configuration table. */ struct fwd_lcore { + struct rte_gso_ctx gso_ctx; /**< GSO context */ struct rte_mempool *mbp; /**< The mbuf pool to use by this core */ void *gro_ctx; /**< GRO context */ streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */ @@ -450,6 +452,13 @@ struct gro_status { extern struct gro_status gro_ports[RTE_MAX_ETHPORTS]; extern uint8_t gro_flush_cycles; +#define GSO_MAX_PKT_BURST 2048 +struct gso_status { + uint8_t enable; +}; +extern struct gso_status gso_ports[RTE_MAX_ETHPORTS]; +extern uint16_t gso_max_segment_size; + static inline unsigned int lcore_num(void) { @@ -652,6 +661,7 @@ int tx_queue_id_is_invalid(queueid_t txq_id); void setup_gro(const char *onoff, portid_t port_id); void setup_gro_flush_cycles(uint8_t cycles); void show_gro(portid_t port_id); +void setup_gso(const char *mode, portid_t port_id); /* Functions to manage the set of filtered Multicast MAC addresses */ void mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr); diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst index 0f45344484..eb3cc66eb0 100644 --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst @@ -960,6 +960,52 @@ Please note that the large value of ``cycles`` may cause the poor TCP/IP stack performance. Because the GROed packets are delayed to arrive the stack, thus causing more duplicated ACKs and TCP retransmissions. +set port - gso +~~~~~~~~~~~~~~ + +Toggle per-port GSO support in ``csum`` forwarding engine:: + + testpmd> set port gso on|off + +If enabled, the csum forwarding engine will perform GSO on supported IPv4 +packets, transmitted on the given port. + +If disabled, packets transmitted on the given port will not undergo GSO. +By default, GSO is disabled for all ports. + +.. note:: + + When GSO is enabled on a port, supported IPv4 packets transmitted on that + port undergo GSO. Afterwards, the segmented packets are represented by + multi-segment mbufs; however, the csum forwarding engine doesn't calculation + of checksums for GSO'd segments in SW. As a result, if users want correct + checksums in GSO segments, they should enable HW checksum calculation for + GSO-enabled ports. + + For example, HW checksum calculation for VxLAN GSO'd packets may be enabled + by setting the following options in the csum forwarding engine: + + testpmd> csum set outer_ip hw + + testpmd> csum set ip hw + + testpmd> csum set tcp hw + +set gso segsz +~~~~~~~~~~~~~ + +Set the maximum GSO segment size (measured in bytes), which includes the +packet header and the packet payload for GSO-enabled ports (global):: + + testpmd> set gso segsz + +show port - gso +~~~~~~~~~~~~~~~ + +Display the status of Generic Segmentation Offload for a given port:: + + testpmd> show port gso + mac_addr add ~~~~~~~~~~~~