app/testpmd: enable TCP/IPv4 VxLAN and GRE GSO
[dpdk.git] / app / test-pmd / config.c
index 10b98b1..d04940c 100644 (file)
@@ -36,7 +36,6 @@
 #include <errno.h>
 #include <stdio.h>
 #include <string.h>
-#include <stdarg.h>
 #include <stdint.h>
 #include <inttypes.h>
 
@@ -71,6 +70,7 @@
 #ifdef RTE_LIBRTE_BNXT_PMD
 #include <rte_pmd_bnxt.h>
 #endif
+#include <rte_gro.h>
 
 #include "testpmd.h"
 
@@ -203,8 +203,10 @@ nic_stats_display(portid_t port_id)
        if (diff_cycles > 0)
                diff_cycles = prev_cycles[port_id] - diff_cycles;
 
-       diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id];
-       diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id];
+       diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
+               (stats.ipackets - prev_pkts_rx[port_id]) : 0;
+       diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
+               (stats.opackets - prev_pkts_tx[port_id]) : 0;
        prev_pkts_rx[port_id] = stats.ipackets;
        prev_pkts_tx[port_id] = stats.opackets;
        mpps_rx = diff_cycles > 0 ?
@@ -358,7 +360,7 @@ rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
 
        rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
        if (rc != 0) {
-               printf("Failed to retrieve information for port: %hhu, "
+               printf("Failed to retrieve information for port: %u, "
                        "RX queue: %hu\nerror desc: %s(%d)\n",
                        port_id, queue_id, strerror(-rc), rc);
                return;
@@ -391,7 +393,7 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
 
        rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
        if (rc != 0) {
-               printf("Failed to retrieve information for port: %hhu, "
+               printf("Failed to retrieve information for port: %u, "
                        "TX queue: %hu\nerror desc: %s(%d)\n",
                        port_id, queue_id, strerror(-rc), rc);
                return;
@@ -498,12 +500,15 @@ port_infos_display(portid_t port_id)
                char *p;
 
                printf("Supported flow types:\n");
-               for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX;
-                                                               i++) {
+               for (i = RTE_ETH_FLOW_UNKNOWN + 1;
+                    i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
                        if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
                                continue;
                        p = flowtype_to_str(i);
-                       printf("  %s\n", (p ? p : "unknown"));
+                       if (p)
+                               printf("  %s\n", p);
+                       else
+                               printf("  user defined %d\n", i);
                }
        }
 
@@ -946,6 +951,10 @@ static const struct {
        MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
        MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
        MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
+       MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+       MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+       MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+       MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
@@ -953,8 +962,10 @@ static void
 flow_item_spec_size(const struct rte_flow_item *item,
                    size_t *size, size_t *pad)
 {
-       if (!item->spec)
+       if (!item->spec) {
+               *size = 0;
                goto empty;
+       }
        switch (item->type) {
                union {
                        const struct rte_flow_item_raw *raw;
@@ -966,10 +977,10 @@ flow_item_spec_size(const struct rte_flow_item *item,
                        spec.raw->length * sizeof(*spec.raw->pattern);
                break;
        default:
-empty:
-               *size = 0;
+               *size = flow_item[item->type].size;
                break;
        }
+empty:
        *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
 }
 
@@ -1004,8 +1015,10 @@ static void
 flow_action_conf_size(const struct rte_flow_action *action,
                      size_t *size, size_t *pad)
 {
-       if (!action->conf)
+       if (!action->conf) {
+               *size = 0;
                goto empty;
+       }
        switch (action->type) {
                union {
                        const struct rte_flow_action_rss *rss;
@@ -1017,10 +1030,10 @@ flow_action_conf_size(const struct rte_flow_action *action,
                        conf.rss->num * sizeof(*conf.rss->queue);
                break;
        default:
-empty:
-               *size = 0;
+               *size = flow_action[action->type].size;
                break;
        }
+empty:
        *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
 }
 
@@ -1411,6 +1424,22 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
        }
 }
 
+/** Restrict ingress traffic to the defined flow rules. */
+int
+port_flow_isolate(portid_t port_id, int set)
+{
+       struct rte_flow_error error;
+
+       /* Poisoning to make sure PMDs update it in case of error. */
+       memset(&error, 0x66, sizeof(error));
+       if (rte_flow_isolate(port_id, set, &error))
+               return port_flow_complain(&error);
+       printf("Ingress traffic on port %u is %s to the defined flow rules\n",
+              port_id,
+              set ? "now restricted" : "not restricted anymore");
+       return 0;
+}
+
 /*
  * RX/TX ring descriptors display functions.
  */
@@ -2398,6 +2427,113 @@ set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
        tx_pkt_nb_segs = (uint8_t) nb_segs;
 }
 
+void
+setup_gro(const char *onoff, portid_t port_id)
+{
+       if (!rte_eth_dev_is_valid_port(port_id)) {
+               printf("invalid port id %u\n", port_id);
+               return;
+       }
+       if (test_done == 0) {
+               printf("Before enable/disable GRO,"
+                               " please stop forwarding first\n");
+               return;
+       }
+       if (strcmp(onoff, "on") == 0) {
+               if (gro_ports[port_id].enable != 0) {
+                       printf("Port %u has enabled GRO. Please"
+                                       " disable GRO first\n", port_id);
+                       return;
+               }
+               if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
+                       gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
+                       gro_ports[port_id].param.max_flow_num =
+                               GRO_DEFAULT_FLOW_NUM;
+                       gro_ports[port_id].param.max_item_per_flow =
+                               GRO_DEFAULT_ITEM_NUM_PER_FLOW;
+               }
+               gro_ports[port_id].enable = 1;
+       } else {
+               if (gro_ports[port_id].enable == 0) {
+                       printf("Port %u has disabled GRO\n", port_id);
+                       return;
+               }
+               gro_ports[port_id].enable = 0;
+       }
+}
+
+void
+setup_gro_flush_cycles(uint8_t cycles)
+{
+       if (test_done == 0) {
+               printf("Before change flush interval for GRO,"
+                               " please stop forwarding first.\n");
+               return;
+       }
+
+       if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
+                       GRO_DEFAULT_FLUSH_CYCLES) {
+               printf("The flushing cycle be in the range"
+                               " of 1 to %u. Revert to the default"
+                               " value %u.\n",
+                               GRO_MAX_FLUSH_CYCLES,
+                               GRO_DEFAULT_FLUSH_CYCLES);
+               cycles = GRO_DEFAULT_FLUSH_CYCLES;
+       }
+
+       gro_flush_cycles = cycles;
+}
+
+void
+show_gro(portid_t port_id)
+{
+       struct rte_gro_param *param;
+       uint32_t max_pkts_num;
+
+       param = &gro_ports[port_id].param;
+
+       if (!rte_eth_dev_is_valid_port(port_id)) {
+               printf("Invalid port id %u.\n", port_id);
+               return;
+       }
+       if (gro_ports[port_id].enable) {
+               printf("GRO type: TCP/IPv4\n");
+               if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
+                       max_pkts_num = param->max_flow_num *
+                               param->max_item_per_flow;
+               } else
+                       max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
+               printf("Max number of packets to perform GRO: %u\n",
+                               max_pkts_num);
+               printf("Flushing cycles: %u\n", gro_flush_cycles);
+       } else
+               printf("Port %u doesn't enable GRO.\n", port_id);
+}
+
+void
+setup_gso(const char *mode, portid_t port_id)
+{
+       if (!rte_eth_dev_is_valid_port(port_id)) {
+               printf("invalid port id %u\n", port_id);
+               return;
+       }
+       if (strcmp(mode, "on") == 0) {
+               if (test_done == 0) {
+                       printf("before enabling GSO,"
+                                       " please stop forwarding first\n");
+                       return;
+               }
+               gso_ports[port_id].enable = 1;
+       } else if (strcmp(mode, "off") == 0) {
+               if (test_done == 0) {
+                       printf("before disabling GSO,"
+                                       " please stop forwarding first\n");
+                       return;
+               }
+               gso_ports[port_id].enable = 0;
+       }
+}
+
 char*
 list_pkt_forwarding_modes(void)
 {
@@ -2948,7 +3084,7 @@ fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
                        return;
                }
        }
-       (void)rte_memcpy(&flex_conf->flex_mask[idx],
+       rte_memcpy(&flex_conf->flex_mask[idx],
                         cfg,
                         sizeof(struct rte_eth_fdir_flex_mask));
 }
@@ -2978,16 +3114,16 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
                        return;
                }
        }
-       (void)rte_memcpy(&flex_conf->flex_set[idx],
+       rte_memcpy(&flex_conf->flex_set[idx],
                         cfg,
                         sizeof(struct rte_eth_flex_payload_cfg));
 
 }
 
-#ifdef RTE_LIBRTE_IXGBE_PMD
 void
 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
 {
+#ifdef RTE_LIBRTE_IXGBE_PMD
        int diag;
 
        if (is_rx)
@@ -2997,15 +3133,15 @@ set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
 
        if (diag == 0)
                return;
-       if(is_rx)
-               printf("rte_pmd_ixgbe_set_vf_rx for port_id=%d failed "
-                       "diag=%d\n", port_id, diag);
-       else
-               printf("rte_pmd_ixgbe_set_vf_tx for port_id=%d failed "
-                       "diag=%d\n", port_id, diag);
-
-}
+       printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
+                       is_rx ? "rx" : "tx", port_id, diag);
+       return;
 #endif
+       printf("VF %s setting not supported for port %d\n",
+                       is_rx ? "Rx" : "Tx", port_id);
+       RTE_SET_USED(vf);
+       RTE_SET_USED(on);
+}
 
 int
 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
@@ -3292,6 +3428,27 @@ open_ddp_package_file(const char *file_path, uint32_t *size)
        return buf;
 }
 
+int
+save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size)
+{
+       FILE *fh = fopen(file_path, "wb");
+
+       if (fh == NULL) {
+               printf("%s: Failed to open %s\n", __func__, file_path);
+               return -1;
+       }
+
+       if (fwrite(buf, 1, size, fh) != size) {
+               fclose(fh);
+               printf("%s: File write operation failed\n", __func__);
+               return -1;
+       }
+
+       fclose(fh);
+
+       return 0;
+}
+
 int
 close_ddp_package_file(uint8_t *buf)
 {