test: remove autotest grouping
[dpdk.git] / test / test / autotest_runner.py
index b09b578..d6ae57e 100644 (file)
@@ -3,6 +3,7 @@
 
 # The main logic behind running autotests in parallel
 
+from __future__ import print_function
 import StringIO
 import csv
 import multiprocessing
@@ -41,19 +42,17 @@ def wait_prompt(child):
 # quite a bit of effort to make it work).
 
 
-def run_test_group(cmdline, test_group):
-    results = []
-    child = None
+def run_test_group(cmdline, prefix, target, test):
     start_time = time.time()
-    startuplog = None
+
+    # prepare logging of init
+    startuplog = StringIO.StringIO()
 
     # run test app
     try:
-        # prepare logging of init
-        startuplog = StringIO.StringIO()
 
-        print >>startuplog, "\n%s %s\n" % ("=" * 20, test_group["Prefix"])
-        print >>startuplog, "\ncmdline=%s" % cmdline
+        print("\n%s %s\n" % ("=" * 20, prefix), file=startuplog)
+        print("\ncmdline=%s" % cmdline, file=startuplog)
 
         child = pexpect.spawn(cmdline, logfile=startuplog)
 
@@ -61,97 +60,54 @@ def run_test_group(cmdline, test_group):
         if not wait_prompt(child):
             child.close()
 
-            results.append((-1,
-                            "Fail [No prompt]",
-                            "Start %s" % test_group["Prefix"],
-                            time.time() - start_time,
-                            startuplog.getvalue(),
-                            None))
-
-            # mark all tests as failed
-            for test in test_group["Tests"]:
-                results.append((-1, "Fail [No prompt]", test["Name"],
-                                time.time() - start_time, "", None))
-            # exit test
-            return results
+            return -1, "Fail [No prompt]", "Start %s" % prefix,\
+                   time.time() - start_time, startuplog.getvalue(), None
 
     except:
-        results.append((-1,
-                        "Fail [Can't run]",
-                        "Start %s" % test_group["Prefix"],
-                        time.time() - start_time,
-                        startuplog.getvalue(),
-                        None))
-
-        # mark all tests as failed
-        for t in test_group["Tests"]:
-            results.append((-1, "Fail [Can't run]", t["Name"],
-                            time.time() - start_time, "", None))
-        # exit test
-        return results
-
-    # startup was successful
-    results.append((0, "Success", "Start %s" % test_group["Prefix"],
-                    time.time() - start_time, startuplog.getvalue(), None))
-
-    # parse the binary for available test commands
-    binary = cmdline.split()[0]
-    stripped = 'not stripped' not in subprocess.check_output(['file', binary])
-    if not stripped:
-        symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
-        avail_cmds = re.findall('test_register_(\w+)', symbols)
-
-    # run all tests in test group
-    for test in test_group["Tests"]:
-
-        # create log buffer for each test
-        # in multiprocessing environment, the logging would be
-        # interleaved and will create a mess, hence the buffering
-        logfile = StringIO.StringIO()
-        child.logfile = logfile
-
-        result = ()
-
-        # make a note when the test started
-        start_time = time.time()
+        return -1, "Fail [Can't run]", "Start %s" % prefix,\
+               time.time() - start_time, startuplog.getvalue(), None
 
-        try:
-            # print test name to log buffer
-            print >>logfile, "\n%s %s\n" % ("-" * 20, test["Name"])
+    # create log buffer for each test
+    # in multiprocessing environment, the logging would be
+    # interleaved and will create a mess, hence the buffering
+    logfile = StringIO.StringIO()
+    child.logfile = logfile
 
-            # run test function associated with the test
-            if stripped or test["Command"] in avail_cmds:
-                result = test["Func"](child, test["Command"])
-            else:
-                result = (0, "Skipped [Not Available]")
+    # make a note when the test started
+    start_time = time.time()
 
-            # make a note when the test was finished
-            end_time = time.time()
+    try:
+        # print test name to log buffer
+        print("\n%s %s\n" % ("-" * 20, test["Name"]), file=logfile)
 
-            # append test data to the result tuple
-            result += (test["Name"], end_time - start_time,
-                       logfile.getvalue())
+        # run test function associated with the test
+        result = test["Func"](child, test["Command"])
 
-            # call report function, if any defined, and supply it with
-            # target and complete log for test run
-            if test["Report"]:
-                report = test["Report"](self.target, log)
+        # make a note when the test was finished
+        end_time = time.time()
 
-                # append report to results tuple
-                result += (report,)
-            else:
-                # report is None
-                result += (None,)
-        except:
-            # make a note when the test crashed
-            end_time = time.time()
+        log = logfile.getvalue()
+
+        # append test data to the result tuple
+        result += (test["Name"], end_time - start_time, log)
+
+        # call report function, if any defined, and supply it with
+        # target and complete log for test run
+        if test["Report"]:
+            report = test["Report"](target, log)
+
+            # append report to results tuple
+            result += (report,)
+        else:
+            # report is None
+            result += (None,)
+    except:
+        # make a note when the test crashed
+        end_time = time.time()
 
-            # mark test as failed
-            result = (-1, "Fail [Crash]", test["Name"],
-                      end_time - start_time, logfile.getvalue(), None)
-        finally:
-            # append the results to the results list
-            results.append(result)
+        # mark test as failed
+        result = (-1, "Fail [Crash]", test["Name"],
+                  end_time - start_time, logfile.getvalue(), None)
 
     # regardless of whether test has crashed, try quitting it
     try:
@@ -163,7 +119,7 @@ def run_test_group(cmdline, test_group):
         pass
 
     # return test results
-    return results
+    return result
 
 
 # class representing an instance of autotests run
@@ -184,8 +140,12 @@ class AutotestRunner:
     def __init__(self, cmdline, target, blacklist, whitelist):
         self.cmdline = cmdline
         self.target = target
+        self.binary = cmdline.split()[0]
         self.blacklist = blacklist
         self.whitelist = whitelist
+        self.skipped = []
+        self.parallel_tests = []
+        self.non_parallel_tests = []
 
         # log file filename
         logfile = "%s.log" % target
@@ -199,128 +159,99 @@ class AutotestRunner:
         self.csvwriter.writerow(["test_name", "test_result", "result_str"])
 
     # set up cmdline string
-    def __get_cmdline(self, test):
+    def __get_cmdline(self):
         cmdline = self.cmdline
 
-        # append memory limitations for each test
-        # otherwise tests won't run in parallel
-        if "i686" not in self.target:
-            cmdline += " --socket-mem=%s" % test["Memory"]
-        else:
-            # affinitize startup so that tests don't fail on i686
-            cmdline = "taskset 1 " + cmdline
-            cmdline += " -m " + str(sum(map(int, test["Memory"].split(","))))
-
-        # set group prefix for autotest group
-        # otherwise they won't run in parallel
-        cmdline += " --file-prefix=%s" % test["Prefix"]
+        # affinitize startup so that tests don't fail on i686
+        cmdline = "taskset 1 " + cmdline
 
         return cmdline
 
-    def add_parallel_test_group(self, test_group):
-        self.parallel_test_groups.append(test_group)
+    def __process_result(self, result):
 
-    def add_non_parallel_test_group(self, test_group):
-        self.non_parallel_test_groups.append(test_group)
+        # unpack result tuple
+        test_result, result_str, test_name, \
+            test_time, log, report = result
 
-    def __process_results(self, results):
-        # this iterates over individual test results
-        for i, result in enumerate(results):
+        # get total run time
+        cur_time = time.time()
+        total_time = int(cur_time - self.start)
 
-            # increase total number of tests that were run
-            # do not include "start" test
-            if i > 0:
-                self.n_tests += 1
+        # print results, test run time and total time since start
+        result = ("%s:" % test_name).ljust(30)
+        result += result_str.ljust(29)
+        result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
 
-            # unpack result tuple
-            test_result, result_str, test_name, \
-                test_time, log, report = result
+        # don't print out total time every line, it's the same anyway
+        print(result + "[%02dm %02ds]" % (total_time / 60, total_time % 60))
 
-            # get total run time
-            cur_time = time.time()
-            total_time = int(cur_time - self.start)
+        # if test failed and it wasn't a "start" test
+        if test_result < 0:
+            self.fails += 1
 
-            # print results, test run time and total time since start
-            result = ("%s:" % test_name).ljust(30)
-            result += result_str.ljust(29)
-            result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
+        # collect logs
+        self.log_buffers.append(log)
 
-            # don't print out total time every line, it's the same anyway
-            if i == len(results) - 1:
-                print(result +
-                      "[%02dm %02ds]" % (total_time / 60, total_time % 60))
-            else:
-                print(result)
-
-            # if test failed and it wasn't a "start" test
-            if test_result < 0 and not i == 0:
-                self.fails += 1
-
-            # collect logs
-            self.log_buffers.append(log)
-
-            # create report if it exists
-            if report:
-                try:
-                    f = open("%s_%s_report.rst" %
-                             (self.target, test_name), "w")
-                except IOError:
-                    print("Report for %s could not be created!" % test_name)
-                else:
-                    with f:
-                        f.write(report)
-
-            # write test result to CSV file
-            if i != 0:
-                self.csvwriter.writerow([test_name, test_result, result_str])
-
-    # this function iterates over test groups and removes each
-    # test that is not in whitelist/blacklist
-    def __filter_groups(self, test_groups):
-        groups_to_remove = []
-
-        # filter out tests from parallel test groups
-        for i, test_group in enumerate(test_groups):
-
-            # iterate over a copy so that we could safely delete individual
-            # tests
-            for test in test_group["Tests"][:]:
-                test_id = test["Command"]
-
-                # dump tests are specified in full e.g. "Dump_mempool"
-                if "_autotest" in test_id:
-                    test_id = test_id[:-len("_autotest")]
-
-                # filter out blacklisted/whitelisted tests
-                if self.blacklist and test_id in self.blacklist:
-                    test_group["Tests"].remove(test)
-                    continue
-                if self.whitelist and test_id not in self.whitelist:
-                    test_group["Tests"].remove(test)
-                    continue
-
-            # modify or remove original group
-            if len(test_group["Tests"]) > 0:
-                test_groups[i] = test_group
+        # create report if it exists
+        if report:
+            try:
+                f = open("%s_%s_report.rst" %
+                         (self.target, test_name), "w")
+            except IOError:
+                print("Report for %s could not be created!" % test_name)
             else:
-                # remember which groups should be deleted
-                # put the numbers backwards so that we start
-                # deleting from the end, not from the beginning
-                groups_to_remove.insert(0, i)
+                with f:
+                    f.write(report)
+
+        # write test result to CSV file
+        self.csvwriter.writerow([test_name, test_result, result_str])
+
+    # this function checks individual test and decides if this test should be in
+    # the group by comparing it against  whitelist/blacklist. it also checks if
+    # the test is compiled into the binary, and marks it as skipped if necessary
+    def __filter_test(self, test):
+        test_cmd = test["Command"]
+        test_id = test_cmd
+
+        # dump tests are specified in full e.g. "Dump_mempool"
+        if "_autotest" in test_id:
+            test_id = test_id[:-len("_autotest")]
+
+        # filter out blacklisted/whitelisted tests
+        if self.blacklist and test_id in self.blacklist:
+            return False
+        if self.whitelist and test_id not in self.whitelist:
+            return False
+
+        # if test wasn't compiled in, remove it as well
+
+        # parse the binary for available test commands
+        stripped = 'not stripped' not in \
+                   subprocess.check_output(['file', self.binary])
+        if not stripped:
+            symbols = subprocess.check_output(['nm',
+                                               self.binary]).decode('utf-8')
+            avail_cmds = re.findall('test_register_(\w+)', symbols)
+
+            if test_cmd not in avail_cmds:
+                # notify user
+                result = 0, "Skipped [Not compiled]", test_id, 0, "", None
+                self.skipped.append(tuple(result))
+                return False
 
-        # remove test groups that need to be removed
-        for i in groups_to_remove:
-            del test_groups[i]
-
-        return test_groups
+        return True
 
     # iterate over test groups and run tests associated with them
     def run_all_tests(self):
         # filter groups
-        self.parallel_test_groups = \
-            self.__filter_groups(self.parallel_test_groups)
-        self.non_parallel_test_groups = \
-            self.__filter_groups(self.non_parallel_test_groups)
+        self.parallel_tests = list(
+            filter(self.__filter_test,
+                   self.parallel_tests)
+        )
+        self.non_parallel_tests = list(
+            filter(self.__filter_test,
+                   self.non_parallel_tests)
+        )
 
         # create a pool of worker threads
         pool = multiprocessing.Pool(processes=1)
@@ -336,15 +267,36 @@ class AutotestRunner:
                   "Test".center(9) + "Total".center(9))
             print("=" * 80)
 
+            # print out skipped autotests if there were any
+            if len(self.skipped):
+                print("Skipped autotests:")
+
+                # print out any skipped tests
+                for result in self.skipped:
+                    # unpack result tuple
+                    test_result, result_str, test_name, _, _, _ = result
+                    self.csvwriter.writerow([test_name, test_result,
+                                             result_str])
+
+                    t = ("%s:" % test_name).ljust(30)
+                    t += result_str.ljust(29)
+                    t += "[00m 00s]"
+
+                    print(t)
+
             # make a note of tests start time
             self.start = time.time()
 
-            # assign worker threads to run test groups
-            for test_group in self.parallel_test_groups:
-                result = pool.apply_async(run_test_group,
-                                          [self.__get_cmdline(test_group),
-                                           test_group])
-                results.append(result)
+            if len(self.parallel_tests) > 0:
+                print("Parallel autotests:")
+                # assign worker threads to run test groups
+                for test_group in self.parallel_tests:
+                    result = pool.apply_async(run_test_group,
+                                              [self.__get_cmdline(),
+                                               "",
+                                               self.target,
+                                               test_group])
+                    results.append(result)
 
             # iterate while we have group execution results to get
             while len(results) > 0:
@@ -359,17 +311,19 @@ class AutotestRunner:
 
                     res = group_result.get()
 
-                    self.__process_results(res)
+                    self.__process_result(res)
 
                     # remove result from results list once we're done with it
                     results.remove(group_result)
 
-            # run non_parallel tests. they are run one by one, synchronously
-            for test_group in self.non_parallel_test_groups:
-                group_result = run_test_group(
-                    self.__get_cmdline(test_group), test_group)
+            if len(self.non_parallel_tests) > 0:
+                print("Non-parallel autotests:")
+                # run non_parallel tests. they are run one by one, synchronously
+                for test_group in self.non_parallel_tests:
+                    group_result = run_test_group(
+                        self.__get_cmdline(), "", self.target, test_group)
 
-                self.__process_results(group_result)
+                    self.__process_result(group_result)
 
             # get total run time
             cur_time = time.time()