# Script that runs cmdline_test app and feeds keystrokes into it.
-import sys, pexpect, string, os, cmdline_test_data
+import cmdline_test_data
+import os
+import pexpect
+import sys
+
#
# function to run test
#
-def runTest(child,test):
- child.send(test["Sequence"])
- if test["Result"] == None:
- return 0
- child.expect(test["Result"],1)
+def runTest(child, test):
+ child.send(test["Sequence"])
+ if test["Result"] is None:
+ return 0
+ child.expect(test["Result"], 1)
+
#
# history test is a special case
# This is a self-contained test, it needs only a pexpect child
#
def runHistoryTest(child):
- # find out history size
- child.sendline(cmdline_test_data.CMD_GET_BUFSIZE)
- child.expect("History buffer size: \\d+", timeout=1)
- history_size = int(child.after[len(cmdline_test_data.BUFSIZE_TEMPLATE):])
- i = 0
+ # find out history size
+ child.sendline(cmdline_test_data.CMD_GET_BUFSIZE)
+ child.expect("History buffer size: \\d+", timeout=1)
+ history_size = int(child.after[len(cmdline_test_data.BUFSIZE_TEMPLATE):])
+ i = 0
- # fill the history with numbers
- while i < history_size / 10:
- # add 1 to prevent from parsing as octals
- child.send("1" + str(i).zfill(8) + cmdline_test_data.ENTER)
- # the app will simply print out the number
- child.expect(str(i + 100000000), timeout=1)
- i += 1
- # scroll back history
- child.send(cmdline_test_data.UP * (i + 2) + cmdline_test_data.ENTER)
- child.expect("100000000", timeout=1)
+ # fill the history with numbers
+ while i < history_size / 10:
+ # add 1 to prevent from parsing as octals
+ child.send("1" + str(i).zfill(8) + cmdline_test_data.ENTER)
+ # the app will simply print out the number
+ child.expect(str(i + 100000000), timeout=1)
+ i += 1
+ # scroll back history
+ child.send(cmdline_test_data.UP * (i + 2) + cmdline_test_data.ENTER)
+ child.expect("100000000", timeout=1)
# the path to cmdline_test executable is supplied via command-line.
if len(sys.argv) < 2:
- print "Error: please supply cmdline_test app path"
- sys.exit(1)
+ print "Error: please supply cmdline_test app path"
+ sys.exit(1)
test_app_path = sys.argv[1]
if not os.path.exists(test_app_path):
- print "Error: please supply cmdline_test app path"
- sys.exit(1)
+ print "Error: please supply cmdline_test app path"
+ sys.exit(1)
child = pexpect.spawn(test_app_path)
print "Running command-line tests..."
for test in cmdline_test_data.tests:
- print (test["Name"] + ":").ljust(30),
- try:
- runTest(child,test)
- print "PASS"
- except:
- print "FAIL"
- print child
- sys.exit(1)
+ print (test["Name"] + ":").ljust(30),
+ try:
+ runTest(child, test)
+ print "PASS"
+ except:
+ print "FAIL"
+ print child
+ sys.exit(1)
# since last test quits the app, run new instance
child = pexpect.spawn(test_app_path)
print ("History fill test:").ljust(30),
try:
- runHistoryTest(child)
- print "PASS"
+ runHistoryTest(child)
+ print "PASS"
except:
- print "FAIL"
- print child
- sys.exit(1)
+ print "FAIL"
+ print child
+ sys.exit(1)
child.close()
sys.exit(0)
# collection of static data
-import sys
-
# keycode constants
CTRL_A = chr(1)
CTRL_B = chr(2)
# and expected output (if any).
tests = [
-# test basic commands
- {"Name" : "command test 1",
- "Sequence" : "ambiguous first" + ENTER,
- "Result" : CMD1},
- {"Name" : "command test 2",
- "Sequence" : "ambiguous second" + ENTER,
- "Result" : CMD2},
- {"Name" : "command test 3",
- "Sequence" : "ambiguous ambiguous" + ENTER,
- "Result" : AMBIG},
- {"Name" : "command test 4",
- "Sequence" : "ambiguous ambiguous2" + ENTER,
- "Result" : AMBIG},
+ # test basic commands
+ {"Name": "command test 1",
+ "Sequence": "ambiguous first" + ENTER,
+ "Result": CMD1},
+ {"Name": "command test 2",
+ "Sequence": "ambiguous second" + ENTER,
+ "Result": CMD2},
+ {"Name": "command test 3",
+ "Sequence": "ambiguous ambiguous" + ENTER,
+ "Result": AMBIG},
+ {"Name": "command test 4",
+ "Sequence": "ambiguous ambiguous2" + ENTER,
+ "Result": AMBIG},
- {"Name" : "invalid command test 1",
- "Sequence" : "ambiguous invalid" + ENTER,
- "Result" : BAD_ARG},
-# test invalid commands
- {"Name" : "invalid command test 2",
- "Sequence" : "invalid" + ENTER,
- "Result" : NOT_FOUND},
- {"Name" : "invalid command test 3",
- "Sequence" : "ambiguousinvalid" + ENTER2,
- "Result" : NOT_FOUND},
+ {"Name": "invalid command test 1",
+ "Sequence": "ambiguous invalid" + ENTER,
+ "Result": BAD_ARG},
+ # test invalid commands
+ {"Name": "invalid command test 2",
+ "Sequence": "invalid" + ENTER,
+ "Result": NOT_FOUND},
+ {"Name": "invalid command test 3",
+ "Sequence": "ambiguousinvalid" + ENTER2,
+ "Result": NOT_FOUND},
-# test arrows and deletes
- {"Name" : "arrows & delete test 1",
- "Sequence" : "singlebad" + LEFT*2 + CTRL_B + DEL*3 + ENTER,
- "Result" : SINGLE},
- {"Name" : "arrows & delete test 2",
- "Sequence" : "singlebad" + LEFT*5 + RIGHT + CTRL_F + DEL*3 + ENTER,
- "Result" : SINGLE},
+ # test arrows and deletes
+ {"Name": "arrows & delete test 1",
+ "Sequence": "singlebad" + LEFT*2 + CTRL_B + DEL*3 + ENTER,
+ "Result": SINGLE},
+ {"Name": "arrows & delete test 2",
+ "Sequence": "singlebad" + LEFT*5 + RIGHT + CTRL_F + DEL*3 + ENTER,
+ "Result": SINGLE},
-# test backspace
- {"Name" : "backspace test",
- "Sequence" : "singlebad" + BKSPACE*3 + ENTER,
- "Result" : SINGLE},
+ # test backspace
+ {"Name": "backspace test",
+ "Sequence": "singlebad" + BKSPACE*3 + ENTER,
+ "Result": SINGLE},
-# test goto left and goto right
- {"Name" : "goto left test",
- "Sequence" : "biguous first" + CTRL_A + "am" + ENTER,
- "Result" : CMD1},
- {"Name" : "goto right test",
- "Sequence" : "biguous fir" + CTRL_A + "am" + CTRL_E + "st" + ENTER,
- "Result" : CMD1},
+ # test goto left and goto right
+ {"Name": "goto left test",
+ "Sequence": "biguous first" + CTRL_A + "am" + ENTER,
+ "Result": CMD1},
+ {"Name": "goto right test",
+ "Sequence": "biguous fir" + CTRL_A + "am" + CTRL_E + "st" + ENTER,
+ "Result": CMD1},
-# test goto words
- {"Name" : "goto left word test",
- "Sequence" : "ambiguous st" + ALT_B + "fir" + ENTER,
- "Result" : CMD1},
- {"Name" : "goto right word test",
- "Sequence" : "ambig first" + CTRL_A + ALT_F + "uous" + ENTER,
- "Result" : CMD1},
+ # test goto words
+ {"Name": "goto left word test",
+ "Sequence": "ambiguous st" + ALT_B + "fir" + ENTER,
+ "Result": CMD1},
+ {"Name": "goto right word test",
+ "Sequence": "ambig first" + CTRL_A + ALT_F + "uous" + ENTER,
+ "Result": CMD1},
-# test removing words
- {"Name" : "remove left word 1",
- "Sequence" : "single invalid" + CTRL_W + ENTER,
- "Result" : SINGLE},
- {"Name" : "remove left word 2",
- "Sequence" : "single invalid" + ALT_BKSPACE + ENTER,
- "Result" : SINGLE},
- {"Name" : "remove right word",
- "Sequence" : "single invalid" + ALT_B + ALT_D + ENTER,
- "Result" : SINGLE},
+ # test removing words
+ {"Name": "remove left word 1",
+ "Sequence": "single invalid" + CTRL_W + ENTER,
+ "Result": SINGLE},
+ {"Name": "remove left word 2",
+ "Sequence": "single invalid" + ALT_BKSPACE + ENTER,
+ "Result": SINGLE},
+ {"Name": "remove right word",
+ "Sequence": "single invalid" + ALT_B + ALT_D + ENTER,
+ "Result": SINGLE},
-# test kill buffer (copy and paste)
- {"Name" : "killbuffer test 1",
- "Sequence" : "ambiguous" + CTRL_A + CTRL_K + " first" + CTRL_A + CTRL_Y + ENTER,
- "Result" : CMD1},
- {"Name" : "killbuffer test 2",
- "Sequence" : "ambiguous" + CTRL_A + CTRL_K + CTRL_Y*26 + ENTER,
- "Result" : NOT_FOUND},
+ # test kill buffer (copy and paste)
+ {"Name": "killbuffer test 1",
+ "Sequence": "ambiguous" + CTRL_A + CTRL_K + " first" + CTRL_A +
+ CTRL_Y + ENTER,
+ "Result": CMD1},
+ {"Name": "killbuffer test 2",
+ "Sequence": "ambiguous" + CTRL_A + CTRL_K + CTRL_Y*26 + ENTER,
+ "Result": NOT_FOUND},
-# test newline
- {"Name" : "newline test",
- "Sequence" : "invalid" + CTRL_C + "single" + ENTER,
- "Result" : SINGLE},
+ # test newline
+ {"Name": "newline test",
+ "Sequence": "invalid" + CTRL_C + "single" + ENTER,
+ "Result": SINGLE},
-# test redisplay (nothing should really happen)
- {"Name" : "redisplay test",
- "Sequence" : "single" + CTRL_L + ENTER,
- "Result" : SINGLE},
+ # test redisplay (nothing should really happen)
+ {"Name": "redisplay test",
+ "Sequence": "single" + CTRL_L + ENTER,
+ "Result": SINGLE},
-# test autocomplete
- {"Name" : "autocomplete test 1",
- "Sequence" : "si" + TAB + ENTER,
- "Result" : SINGLE},
- {"Name" : "autocomplete test 2",
- "Sequence" : "si" + TAB + "_" + TAB + ENTER,
- "Result" : SINGLE_LONG},
- {"Name" : "autocomplete test 3",
- "Sequence" : "in" + TAB + ENTER,
- "Result" : NOT_FOUND},
- {"Name" : "autocomplete test 4",
- "Sequence" : "am" + TAB + ENTER,
- "Result" : BAD_ARG},
- {"Name" : "autocomplete test 5",
- "Sequence" : "am" + TAB + "fir" + TAB + ENTER,
- "Result" : CMD1},
- {"Name" : "autocomplete test 6",
- "Sequence" : "am" + TAB + "fir" + TAB + TAB + ENTER,
- "Result" : CMD1},
- {"Name" : "autocomplete test 7",
- "Sequence" : "am" + TAB + "fir" + TAB + " " + TAB + ENTER,
- "Result" : CMD1},
- {"Name" : "autocomplete test 8",
- "Sequence" : "am" + TAB + " am" + TAB + " " + ENTER,
- "Result" : AMBIG},
- {"Name" : "autocomplete test 9",
- "Sequence" : "am" + TAB + "inv" + TAB + ENTER,
- "Result" : BAD_ARG},
- {"Name" : "autocomplete test 10",
- "Sequence" : "au" + TAB + ENTER,
- "Result" : NOT_FOUND},
- {"Name" : "autocomplete test 11",
- "Sequence" : "au" + TAB + "1" + ENTER,
- "Result" : AUTO1},
- {"Name" : "autocomplete test 12",
- "Sequence" : "au" + TAB + "2" + ENTER,
- "Result" : AUTO2},
- {"Name" : "autocomplete test 13",
- "Sequence" : "au" + TAB + "2" + TAB + ENTER,
- "Result" : AUTO2},
- {"Name" : "autocomplete test 14",
- "Sequence" : "au" + TAB + "2 " + TAB + ENTER,
- "Result" : AUTO2},
- {"Name" : "autocomplete test 15",
- "Sequence" : "24" + TAB + ENTER,
- "Result" : "24"},
+ # test autocomplete
+ {"Name": "autocomplete test 1",
+ "Sequence": "si" + TAB + ENTER,
+ "Result": SINGLE},
+ {"Name": "autocomplete test 2",
+ "Sequence": "si" + TAB + "_" + TAB + ENTER,
+ "Result": SINGLE_LONG},
+ {"Name": "autocomplete test 3",
+ "Sequence": "in" + TAB + ENTER,
+ "Result": NOT_FOUND},
+ {"Name": "autocomplete test 4",
+ "Sequence": "am" + TAB + ENTER,
+ "Result": BAD_ARG},
+ {"Name": "autocomplete test 5",
+ "Sequence": "am" + TAB + "fir" + TAB + ENTER,
+ "Result": CMD1},
+ {"Name": "autocomplete test 6",
+ "Sequence": "am" + TAB + "fir" + TAB + TAB + ENTER,
+ "Result": CMD1},
+ {"Name": "autocomplete test 7",
+ "Sequence": "am" + TAB + "fir" + TAB + " " + TAB + ENTER,
+ "Result": CMD1},
+ {"Name": "autocomplete test 8",
+ "Sequence": "am" + TAB + " am" + TAB + " " + ENTER,
+ "Result": AMBIG},
+ {"Name": "autocomplete test 9",
+ "Sequence": "am" + TAB + "inv" + TAB + ENTER,
+ "Result": BAD_ARG},
+ {"Name": "autocomplete test 10",
+ "Sequence": "au" + TAB + ENTER,
+ "Result": NOT_FOUND},
+ {"Name": "autocomplete test 11",
+ "Sequence": "au" + TAB + "1" + ENTER,
+ "Result": AUTO1},
+ {"Name": "autocomplete test 12",
+ "Sequence": "au" + TAB + "2" + ENTER,
+ "Result": AUTO2},
+ {"Name": "autocomplete test 13",
+ "Sequence": "au" + TAB + "2" + TAB + ENTER,
+ "Result": AUTO2},
+ {"Name": "autocomplete test 14",
+ "Sequence": "au" + TAB + "2 " + TAB + ENTER,
+ "Result": AUTO2},
+ {"Name": "autocomplete test 15",
+ "Sequence": "24" + TAB + ENTER,
+ "Result": "24"},
-# test history
- {"Name" : "history test 1",
- "Sequence" : "invalid" + ENTER + "single" + ENTER + "invalid" + ENTER + UP + CTRL_P + ENTER,
- "Result" : SINGLE},
- {"Name" : "history test 2",
- "Sequence" : "invalid" + ENTER + "ambiguous first" + ENTER + "invalid" + ENTER + "single" + ENTER + UP * 3 + CTRL_N + DOWN + ENTER,
- "Result" : SINGLE},
+ # test history
+ {"Name": "history test 1",
+ "Sequence": "invalid" + ENTER + "single" + ENTER + "invalid" +
+ ENTER + UP + CTRL_P + ENTER,
+ "Result": SINGLE},
+ {"Name": "history test 2",
+ "Sequence": "invalid" + ENTER + "ambiguous first" + ENTER + "invalid" +
+ ENTER + "single" + ENTER + UP * 3 + CTRL_N + DOWN + ENTER,
+ "Result": SINGLE},
-#
-# tests that improve coverage
-#
+ #
+ # tests that improve coverage
+ #
-# empty space tests
- {"Name" : "empty space test 1",
- "Sequence" : RIGHT + LEFT + CTRL_B + CTRL_F + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 2",
- "Sequence" : BKSPACE + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 3",
- "Sequence" : CTRL_E*2 + CTRL_A*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 4",
- "Sequence" : ALT_F*2 + ALT_B*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 5",
- "Sequence" : " " + CTRL_E*2 + CTRL_A*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 6",
- "Sequence" : " " + CTRL_A + ALT_F*2 + ALT_B*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 7",
- "Sequence" : " " + CTRL_A + CTRL_D + CTRL_E + CTRL_D + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 8",
- "Sequence" : " space" + CTRL_W*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 9",
- "Sequence" : " space" + ALT_BKSPACE*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 10",
- "Sequence" : " space " + CTRL_A + ALT_D*3 + ENTER,
- "Result" : PROMPT},
+ # empty space tests
+ {"Name": "empty space test 1",
+ "Sequence": RIGHT + LEFT + CTRL_B + CTRL_F + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 2",
+ "Sequence": BKSPACE + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 3",
+ "Sequence": CTRL_E*2 + CTRL_A*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 4",
+ "Sequence": ALT_F*2 + ALT_B*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 5",
+ "Sequence": " " + CTRL_E*2 + CTRL_A*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 6",
+ "Sequence": " " + CTRL_A + ALT_F*2 + ALT_B*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 7",
+ "Sequence": " " + CTRL_A + CTRL_D + CTRL_E + CTRL_D + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 8",
+ "Sequence": " space" + CTRL_W*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 9",
+ "Sequence": " space" + ALT_BKSPACE*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 10",
+ "Sequence": " space " + CTRL_A + ALT_D*3 + ENTER,
+ "Result": PROMPT},
-# non-printable char tests
- {"Name" : "non-printable test 1",
- "Sequence" : chr(27) + chr(47) + ENTER,
- "Result" : PROMPT},
- {"Name" : "non-printable test 2",
- "Sequence" : chr(27) + chr(128) + ENTER*7,
- "Result" : PROMPT},
- {"Name" : "non-printable test 3",
- "Sequence" : chr(27) + chr(91) + chr(127) + ENTER*6,
- "Result" : PROMPT},
+ # non-printable char tests
+ {"Name": "non-printable test 1",
+ "Sequence": chr(27) + chr(47) + ENTER,
+ "Result": PROMPT},
+ {"Name": "non-printable test 2",
+ "Sequence": chr(27) + chr(128) + ENTER*7,
+ "Result": PROMPT},
+ {"Name": "non-printable test 3",
+ "Sequence": chr(27) + chr(91) + chr(127) + ENTER*6,
+ "Result": PROMPT},
-# miscellaneous tests
- {"Name" : "misc test 1",
- "Sequence" : ENTER,
- "Result" : PROMPT},
- {"Name" : "misc test 2",
- "Sequence" : "single #comment" + ENTER,
- "Result" : SINGLE},
- {"Name" : "misc test 3",
- "Sequence" : "#empty line" + ENTER,
- "Result" : PROMPT},
- {"Name" : "misc test 4",
- "Sequence" : " single " + ENTER,
- "Result" : SINGLE},
- {"Name" : "misc test 5",
- "Sequence" : "single#" + ENTER,
- "Result" : SINGLE},
- {"Name" : "misc test 6",
- "Sequence" : 'a' * 257 + ENTER,
- "Result" : NOT_FOUND},
- {"Name" : "misc test 7",
- "Sequence" : "clear_history" + UP*5 + DOWN*5 + ENTER,
- "Result" : PROMPT},
- {"Name" : "misc test 8",
- "Sequence" : "a" + HELP + CTRL_C,
- "Result" : PROMPT},
- {"Name" : "misc test 9",
- "Sequence" : CTRL_D*3,
- "Result" : None},
+ # miscellaneous tests
+ {"Name": "misc test 1",
+ "Sequence": ENTER,
+ "Result": PROMPT},
+ {"Name": "misc test 2",
+ "Sequence": "single #comment" + ENTER,
+ "Result": SINGLE},
+ {"Name": "misc test 3",
+ "Sequence": "#empty line" + ENTER,
+ "Result": PROMPT},
+ {"Name": "misc test 4",
+ "Sequence": " single " + ENTER,
+ "Result": SINGLE},
+ {"Name": "misc test 5",
+ "Sequence": "single#" + ENTER,
+ "Result": SINGLE},
+ {"Name": "misc test 6",
+ "Sequence": 'a' * 257 + ENTER,
+ "Result": NOT_FOUND},
+ {"Name": "misc test 7",
+ "Sequence": "clear_history" + UP*5 + DOWN*5 + ENTER,
+ "Result": PROMPT},
+ {"Name": "misc test 8",
+ "Sequence": "a" + HELP + CTRL_C,
+ "Result": PROMPT},
+ {"Name": "misc test 9",
+ "Sequence": CTRL_D*3,
+ "Result": None},
]
# Script that uses either test app or qemu controlled by python-pexpect
-import sys, autotest_data, autotest_runner
-
+import autotest_data
+import autotest_runner
+import sys
def usage():
- print"Usage: autotest.py [test app|test iso image]",
- print "[target] [whitelist|-blacklist]"
+ print"Usage: autotest.py [test app|test iso image]",
+ print "[target] [whitelist|-blacklist]"
if len(sys.argv) < 3:
- usage()
- sys.exit(1)
+ usage()
+ sys.exit(1)
target = sys.argv[2]
-test_whitelist=None
-test_blacklist=None
+test_whitelist = None
+test_blacklist = None
# get blacklist/whitelist
if len(sys.argv) > 3:
- testlist = sys.argv[3].split(',')
- testlist = [test.lower() for test in testlist]
- if testlist[0].startswith('-'):
- testlist[0] = testlist[0].lstrip('-')
- test_blacklist = testlist
- else:
- test_whitelist = testlist
+ testlist = sys.argv[3].split(',')
+ testlist = [test.lower() for test in testlist]
+ if testlist[0].startswith('-'):
+ testlist[0] = testlist[0].lstrip('-')
+ test_blacklist = testlist
+ else:
+ test_whitelist = testlist
-cmdline = "%s -c f -n 4"%(sys.argv[1])
+cmdline = "%s -c f -n 4" % (sys.argv[1])
print cmdline
-runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist, test_whitelist)
+runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist,
+ test_whitelist)
for test_group in autotest_data.parallel_test_group_list:
- runner.add_parallel_test_group(test_group)
+ runner.add_parallel_test_group(test_group)
for test_group in autotest_data.non_parallel_test_group_list:
- runner.add_non_parallel_test_group(test_group)
+ runner.add_non_parallel_test_group(test_group)
num_fails = runner.run_all_tests()
-#!/usr/bin/python
+#!/usr/bin/env python
# BSD LICENSE
#
from glob import glob
from autotest_test_funcs import *
+
# quick and dirty function to find out number of sockets
def num_sockets():
- result = len(glob("/sys/devices/system/node/node*"))
- if result == 0:
- return 1
- return result
+ result = len(glob("/sys/devices/system/node/node*"))
+ if result == 0:
+ return 1
+ return result
+
# Assign given number to each socket
# e.g. 32 becomes 32,32 or 32,32,32,32
# groups of tests that can be run in parallel
# the grouping has been found largely empirically
parallel_test_group_list = [
-
-{
- "Prefix": "group_1",
- "Memory" : per_sockets(8),
- "Tests" :
- [
- {
- "Name" : "Cycles autotest",
- "Command" : "cycles_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Timer autotest",
- "Command" : "timer_autotest",
- "Func" : timer_autotest,
- "Report" : None,
- },
- {
- "Name" : "Debug autotest",
- "Command" : "debug_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Errno autotest",
- "Command" : "errno_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Meter autotest",
- "Command" : "meter_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Common autotest",
- "Command" : "common_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Resource autotest",
- "Command" : "resource_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "group_2",
- "Memory" : "16",
- "Tests" :
- [
- {
- "Name" : "Memory autotest",
- "Command" : "memory_autotest",
- "Func" : memory_autotest,
- "Report" : None,
- },
- {
- "Name" : "Read/write lock autotest",
- "Command" : "rwlock_autotest",
- "Func" : rwlock_autotest,
- "Report" : None,
- },
- {
- "Name" : "Logs autotest",
- "Command" : "logs_autotest",
- "Func" : logs_autotest,
- "Report" : None,
- },
- {
- "Name" : "CPU flags autotest",
- "Command" : "cpuflags_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Version autotest",
- "Command" : "version_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "EAL filesystem autotest",
- "Command" : "eal_fs_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "EAL flags autotest",
- "Command" : "eal_flags_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Hash autotest",
- "Command" : "hash_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ],
-},
-{
- "Prefix": "group_3",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "LPM autotest",
- "Command" : "lpm_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "LPM6 autotest",
- "Command" : "lpm6_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Memcpy autotest",
- "Command" : "memcpy_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Memzone autotest",
- "Command" : "memzone_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "String autotest",
- "Command" : "string_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Alarm autotest",
- "Command" : "alarm_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "group_4",
- "Memory" : per_sockets(128),
- "Tests" :
- [
- {
- "Name" : "PCI autotest",
- "Command" : "pci_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Malloc autotest",
- "Command" : "malloc_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Multi-process autotest",
- "Command" : "multiprocess_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Mbuf autotest",
- "Command" : "mbuf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Per-lcore autotest",
- "Command" : "per_lcore_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Ring autotest",
- "Command" : "ring_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "group_5",
- "Memory" : "32",
- "Tests" :
- [
- {
- "Name" : "Spinlock autotest",
- "Command" : "spinlock_autotest",
- "Func" : spinlock_autotest,
- "Report" : None,
- },
- {
- "Name" : "Byte order autotest",
- "Command" : "byteorder_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "TAILQ autotest",
- "Command" : "tailq_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Command-line autotest",
- "Command" : "cmdline_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Interrupts autotest",
- "Command" : "interrupt_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "group_6",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Function reentrancy autotest",
- "Command" : "func_reentrancy_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Mempool autotest",
- "Command" : "mempool_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Atomics autotest",
- "Command" : "atomic_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Prefetch autotest",
- "Command" : "prefetch_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" :"Red autotest",
- "Command" : "red_autotest",
- "Func" :default_autotest,
- "Report" :None,
- },
- ]
-},
-{
- "Prefix" : "group_7",
- "Memory" : "64",
- "Tests" :
- [
- {
- "Name" : "PMD ring autotest",
- "Command" : "ring_pmd_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Access list control autotest",
- "Command" : "acl_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" :"Sched autotest",
- "Command" : "sched_autotest",
- "Func" :default_autotest,
- "Report" :None,
- },
- ]
-},
+ {
+ "Prefix": "group_1",
+ "Memory": per_sockets(8),
+ "Tests":
+ [
+ {
+ "Name": "Cycles autotest",
+ "Command": "cycles_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Timer autotest",
+ "Command": "timer_autotest",
+ "Func": timer_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Debug autotest",
+ "Command": "debug_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Errno autotest",
+ "Command": "errno_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Meter autotest",
+ "Command": "meter_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Common autotest",
+ "Command": "common_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Resource autotest",
+ "Command": "resource_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_2",
+ "Memory": "16",
+ "Tests":
+ [
+ {
+ "Name": "Memory autotest",
+ "Command": "memory_autotest",
+ "Func": memory_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Read/write lock autotest",
+ "Command": "rwlock_autotest",
+ "Func": rwlock_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Logs autotest",
+ "Command": "logs_autotest",
+ "Func": logs_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "CPU flags autotest",
+ "Command": "cpuflags_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Version autotest",
+ "Command": "version_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "EAL filesystem autotest",
+ "Command": "eal_fs_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "EAL flags autotest",
+ "Command": "eal_flags_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash autotest",
+ "Command": "hash_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ],
+ },
+ {
+ "Prefix": "group_3",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "LPM autotest",
+ "Command": "lpm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "LPM6 autotest",
+ "Command": "lpm6_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memcpy autotest",
+ "Command": "memcpy_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memzone autotest",
+ "Command": "memzone_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "String autotest",
+ "Command": "string_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Alarm autotest",
+ "Command": "alarm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_4",
+ "Memory": per_sockets(128),
+ "Tests":
+ [
+ {
+ "Name": "PCI autotest",
+ "Command": "pci_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Malloc autotest",
+ "Command": "malloc_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Multi-process autotest",
+ "Command": "multiprocess_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mbuf autotest",
+ "Command": "mbuf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Per-lcore autotest",
+ "Command": "per_lcore_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Ring autotest",
+ "Command": "ring_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_5",
+ "Memory": "32",
+ "Tests":
+ [
+ {
+ "Name": "Spinlock autotest",
+ "Command": "spinlock_autotest",
+ "Func": spinlock_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Byte order autotest",
+ "Command": "byteorder_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "TAILQ autotest",
+ "Command": "tailq_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Command-line autotest",
+ "Command": "cmdline_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Interrupts autotest",
+ "Command": "interrupt_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_6",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Function reentrancy autotest",
+ "Command": "func_reentrancy_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mempool autotest",
+ "Command": "mempool_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Atomics autotest",
+ "Command": "atomic_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Prefetch autotest",
+ "Command": "prefetch_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Red autotest",
+ "Command": "red_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_7",
+ "Memory": "64",
+ "Tests":
+ [
+ {
+ "Name": "PMD ring autotest",
+ "Command": "ring_pmd_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Access list control autotest",
+ "Command": "acl_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Sched autotest",
+ "Command": "sched_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
]
# tests that should not be run when any other tests are running
non_parallel_test_group_list = [
-{
- "Prefix" : "kni",
- "Memory" : "512",
- "Tests" :
- [
- {
- "Name" : "KNI autotest",
- "Command" : "kni_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "mempool_perf",
- "Memory" : per_sockets(256),
- "Tests" :
- [
- {
- "Name" : "Mempool performance autotest",
- "Command" : "mempool_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "memcpy_perf",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Memcpy performance autotest",
- "Command" : "memcpy_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "hash_perf",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Hash performance autotest",
- "Command" : "hash_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix" : "power",
- "Memory" : "16",
- "Tests" :
- [
- {
- "Name" : "Power autotest",
- "Command" : "power_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix" : "power_acpi_cpufreq",
- "Memory" : "16",
- "Tests" :
- [
- {
- "Name" : "Power ACPI cpufreq autotest",
- "Command" : "power_acpi_cpufreq_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix" : "power_kvm_vm",
- "Memory" : "16",
- "Tests" :
- [
- {
- "Name" : "Power KVM VM autotest",
- "Command" : "power_kvm_vm_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "timer_perf",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Timer performance autotest",
- "Command" : "timer_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
+ {
+ "Prefix": "kni",
+ "Memory": "512",
+ "Tests":
+ [
+ {
+ "Name": "KNI autotest",
+ "Command": "kni_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "mempool_perf",
+ "Memory": per_sockets(256),
+ "Tests":
+ [
+ {
+ "Name": "Mempool performance autotest",
+ "Command": "mempool_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "memcpy_perf",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Memcpy performance autotest",
+ "Command": "memcpy_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "hash_perf",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Hash performance autotest",
+ "Command": "hash_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "power",
+ "Memory": "16",
+ "Tests":
+ [
+ {
+ "Name": "Power autotest",
+ "Command": "power_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "power_acpi_cpufreq",
+ "Memory": "16",
+ "Tests":
+ [
+ {
+ "Name": "Power ACPI cpufreq autotest",
+ "Command": "power_acpi_cpufreq_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "power_kvm_vm",
+ "Memory": "16",
+ "Tests":
+ [
+ {
+ "Name": "Power KVM VM autotest",
+ "Command": "power_kvm_vm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "timer_perf",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Timer performance autotest",
+ "Command": "timer_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
-#
-# Please always make sure that ring_perf is the last test!
-#
-{
- "Prefix": "ring_perf",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Ring performance autotest",
- "Command" : "ring_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
+ #
+ # Please always make sure that ring_perf is the last test!
+ #
+ {
+ "Prefix": "ring_perf",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Ring performance autotest",
+ "Command": "ring_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
]
# The main logic behind running autotests in parallel
-import multiprocessing, subprocess, sys, pexpect, re, time, os, StringIO, csv
+import StringIO
+import csv
+import multiprocessing
+import pexpect
+import re
+import subprocess
+import sys
+import time
# wait for prompt
+
+
def wait_prompt(child):
- try:
- child.sendline()
- result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
- timeout = 120)
- except:
- return False
- if result == 0:
- return True
- else:
- return False
+ try:
+ child.sendline()
+ result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
+ timeout=120)
+ except:
+ return False
+ if result == 0:
+ return True
+ else:
+ return False
# run a test group
# each result tuple in results list consists of:
# this function needs to be outside AutotestRunner class
# because otherwise Pool won't work (or rather it will require
# quite a bit of effort to make it work).
-def run_test_group(cmdline, test_group):
- results = []
- child = None
- start_time = time.time()
- startuplog = None
-
- # run test app
- try:
- # prepare logging of init
- startuplog = StringIO.StringIO()
-
- print >>startuplog, "\n%s %s\n" % ("="*20, test_group["Prefix"])
- print >>startuplog, "\ncmdline=%s" % cmdline
-
- child = pexpect.spawn(cmdline, logfile=startuplog)
-
- # wait for target to boot
- if not wait_prompt(child):
- child.close()
-
- results.append((-1, "Fail [No prompt]", "Start %s" % test_group["Prefix"],
- time.time() - start_time, startuplog.getvalue(), None))
-
- # mark all tests as failed
- for test in test_group["Tests"]:
- results.append((-1, "Fail [No prompt]", test["Name"],
- time.time() - start_time, "", None))
- # exit test
- return results
-
- except:
- results.append((-1, "Fail [Can't run]", "Start %s" % test_group["Prefix"],
- time.time() - start_time, startuplog.getvalue(), None))
-
- # mark all tests as failed
- for t in test_group["Tests"]:
- results.append((-1, "Fail [Can't run]", t["Name"],
- time.time() - start_time, "", None))
- # exit test
- return results
-
- # startup was successful
- results.append((0, "Success", "Start %s" % test_group["Prefix"],
- time.time() - start_time, startuplog.getvalue(), None))
-
- # parse the binary for available test commands
- binary = cmdline.split()[0]
- stripped = 'not stripped' not in subprocess.check_output(['file', binary])
- if not stripped:
- symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
- avail_cmds = re.findall('test_register_(\w+)', symbols)
-
- # run all tests in test group
- for test in test_group["Tests"]:
-
- # create log buffer for each test
- # in multiprocessing environment, the logging would be
- # interleaved and will create a mess, hence the buffering
- logfile = StringIO.StringIO()
- child.logfile = logfile
-
- result = ()
-
- # make a note when the test started
- start_time = time.time()
-
- try:
- # print test name to log buffer
- print >>logfile, "\n%s %s\n" % ("-"*20, test["Name"])
-
- # run test function associated with the test
- if stripped or test["Command"] in avail_cmds:
- result = test["Func"](child, test["Command"])
- else:
- result = (0, "Skipped [Not Available]")
-
- # make a note when the test was finished
- end_time = time.time()
-
- # append test data to the result tuple
- result += (test["Name"], end_time - start_time,
- logfile.getvalue())
-
- # call report function, if any defined, and supply it with
- # target and complete log for test run
- if test["Report"]:
- report = test["Report"](self.target, log)
-
- # append report to results tuple
- result += (report,)
- else:
- # report is None
- result += (None,)
- except:
- # make a note when the test crashed
- end_time = time.time()
-
- # mark test as failed
- result = (-1, "Fail [Crash]", test["Name"],
- end_time - start_time, logfile.getvalue(), None)
- finally:
- # append the results to the results list
- results.append(result)
-
- # regardless of whether test has crashed, try quitting it
- try:
- child.sendline("quit")
- child.close()
- # if the test crashed, just do nothing instead
- except:
- # nop
- pass
-
- # return test results
- return results
-
+def run_test_group(cmdline, test_group):
+ results = []
+ child = None
+ start_time = time.time()
+ startuplog = None
+
+ # run test app
+ try:
+ # prepare logging of init
+ startuplog = StringIO.StringIO()
+
+ print >>startuplog, "\n%s %s\n" % ("=" * 20, test_group["Prefix"])
+ print >>startuplog, "\ncmdline=%s" % cmdline
+
+ child = pexpect.spawn(cmdline, logfile=startuplog)
+
+ # wait for target to boot
+ if not wait_prompt(child):
+ child.close()
+
+ results.append((-1,
+ "Fail [No prompt]",
+ "Start %s" % test_group["Prefix"],
+ time.time() - start_time,
+ startuplog.getvalue(),
+ None))
+
+ # mark all tests as failed
+ for test in test_group["Tests"]:
+ results.append((-1, "Fail [No prompt]", test["Name"],
+ time.time() - start_time, "", None))
+ # exit test
+ return results
+
+ except:
+ results.append((-1,
+ "Fail [Can't run]",
+ "Start %s" % test_group["Prefix"],
+ time.time() - start_time,
+ startuplog.getvalue(),
+ None))
+
+ # mark all tests as failed
+ for t in test_group["Tests"]:
+ results.append((-1, "Fail [Can't run]", t["Name"],
+ time.time() - start_time, "", None))
+ # exit test
+ return results
+
+ # startup was successful
+ results.append((0, "Success", "Start %s" % test_group["Prefix"],
+ time.time() - start_time, startuplog.getvalue(), None))
+
+ # parse the binary for available test commands
+ binary = cmdline.split()[0]
+ stripped = 'not stripped' not in subprocess.check_output(['file', binary])
+ if not stripped:
+ symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
+ avail_cmds = re.findall('test_register_(\w+)', symbols)
+
+ # run all tests in test group
+ for test in test_group["Tests"]:
+
+ # create log buffer for each test
+ # in multiprocessing environment, the logging would be
+ # interleaved and will create a mess, hence the buffering
+ logfile = StringIO.StringIO()
+ child.logfile = logfile
+
+ result = ()
+
+ # make a note when the test started
+ start_time = time.time()
+
+ try:
+ # print test name to log buffer
+ print >>logfile, "\n%s %s\n" % ("-" * 20, test["Name"])
+
+ # run test function associated with the test
+ if stripped or test["Command"] in avail_cmds:
+ result = test["Func"](child, test["Command"])
+ else:
+ result = (0, "Skipped [Not Available]")
+
+ # make a note when the test was finished
+ end_time = time.time()
+
+ # append test data to the result tuple
+ result += (test["Name"], end_time - start_time,
+ logfile.getvalue())
+
+ # call report function, if any defined, and supply it with
+ # target and complete log for test run
+ if test["Report"]:
+ report = test["Report"](self.target, log)
+
+ # append report to results tuple
+ result += (report,)
+ else:
+ # report is None
+ result += (None,)
+ except:
+ # make a note when the test crashed
+ end_time = time.time()
+
+ # mark test as failed
+ result = (-1, "Fail [Crash]", test["Name"],
+ end_time - start_time, logfile.getvalue(), None)
+ finally:
+ # append the results to the results list
+ results.append(result)
+
+ # regardless of whether test has crashed, try quitting it
+ try:
+ child.sendline("quit")
+ child.close()
+ # if the test crashed, just do nothing instead
+ except:
+ # nop
+ pass
+
+ # return test results
+ return results
# class representing an instance of autotests run
class AutotestRunner:
- cmdline = ""
- parallel_test_groups = []
- non_parallel_test_groups = []
- logfile = None
- csvwriter = None
- target = ""
- start = None
- n_tests = 0
- fails = 0
- log_buffers = []
- blacklist = []
- whitelist = []
-
-
- def __init__(self, cmdline, target, blacklist, whitelist):
- self.cmdline = cmdline
- self.target = target
- self.blacklist = blacklist
- self.whitelist = whitelist
-
- # log file filename
- logfile = "%s.log" % target
- csvfile = "%s.csv" % target
-
- self.logfile = open(logfile, "w")
- csvfile = open(csvfile, "w")
- self.csvwriter = csv.writer(csvfile)
-
- # prepare results table
- self.csvwriter.writerow(["test_name","test_result","result_str"])
-
-
-
- # set up cmdline string
- def __get_cmdline(self, test):
- cmdline = self.cmdline
-
- # append memory limitations for each test
- # otherwise tests won't run in parallel
- if not "i686" in self.target:
- cmdline += " --socket-mem=%s"% test["Memory"]
- else:
- # affinitize startup so that tests don't fail on i686
- cmdline = "taskset 1 " + cmdline
- cmdline += " -m " + str(sum(map(int,test["Memory"].split(","))))
-
- # set group prefix for autotest group
- # otherwise they won't run in parallel
- cmdline += " --file-prefix=%s"% test["Prefix"]
-
- return cmdline
-
-
-
- def add_parallel_test_group(self,test_group):
- self.parallel_test_groups.append(test_group)
-
- def add_non_parallel_test_group(self,test_group):
- self.non_parallel_test_groups.append(test_group)
-
-
- def __process_results(self, results):
- # this iterates over individual test results
- for i, result in enumerate(results):
-
- # increase total number of tests that were run
- # do not include "start" test
- if i > 0:
- self.n_tests += 1
-
- # unpack result tuple
- test_result, result_str, test_name, \
- test_time, log, report = result
-
- # get total run time
- cur_time = time.time()
- total_time = int(cur_time - self.start)
-
- # print results, test run time and total time since start
- print ("%s:" % test_name).ljust(30),
- print result_str.ljust(29),
- print "[%02dm %02ds]" % (test_time / 60, test_time % 60),
-
- # don't print out total time every line, it's the same anyway
- if i == len(results) - 1:
- print "[%02dm %02ds]" % (total_time / 60, total_time % 60)
- else:
- print ""
-
- # if test failed and it wasn't a "start" test
- if test_result < 0 and not i == 0:
- self.fails += 1
-
- # collect logs
- self.log_buffers.append(log)
-
- # create report if it exists
- if report:
- try:
- f = open("%s_%s_report.rst" % (self.target,test_name), "w")
- except IOError:
- print "Report for %s could not be created!" % test_name
- else:
- with f:
- f.write(report)
-
- # write test result to CSV file
- if i != 0:
- self.csvwriter.writerow([test_name, test_result, result_str])
-
-
-
-
- # this function iterates over test groups and removes each
- # test that is not in whitelist/blacklist
- def __filter_groups(self, test_groups):
- groups_to_remove = []
-
- # filter out tests from parallel test groups
- for i, test_group in enumerate(test_groups):
-
- # iterate over a copy so that we could safely delete individual tests
- for test in test_group["Tests"][:]:
- test_id = test["Command"]
-
- # dump tests are specified in full e.g. "Dump_mempool"
- if "_autotest" in test_id:
- test_id = test_id[:-len("_autotest")]
-
- # filter out blacklisted/whitelisted tests
- if self.blacklist and test_id in self.blacklist:
- test_group["Tests"].remove(test)
- continue
- if self.whitelist and test_id not in self.whitelist:
- test_group["Tests"].remove(test)
- continue
-
- # modify or remove original group
- if len(test_group["Tests"]) > 0:
- test_groups[i] = test_group
- else:
- # remember which groups should be deleted
- # put the numbers backwards so that we start
- # deleting from the end, not from the beginning
- groups_to_remove.insert(0, i)
-
- # remove test groups that need to be removed
- for i in groups_to_remove:
- del test_groups[i]
-
- return test_groups
-
-
-
- # iterate over test groups and run tests associated with them
- def run_all_tests(self):
- # filter groups
- self.parallel_test_groups = \
- self.__filter_groups(self.parallel_test_groups)
- self.non_parallel_test_groups = \
- self.__filter_groups(self.non_parallel_test_groups)
-
- # create a pool of worker threads
- pool = multiprocessing.Pool(processes=1)
-
- results = []
-
- # whatever happens, try to save as much logs as possible
- try:
-
- # create table header
- print ""
- print "Test name".ljust(30),
- print "Test result".ljust(29),
- print "Test".center(9),
- print "Total".center(9)
- print "=" * 80
-
- # make a note of tests start time
- self.start = time.time()
-
- # assign worker threads to run test groups
- for test_group in self.parallel_test_groups:
- result = pool.apply_async(run_test_group,
- [self.__get_cmdline(test_group), test_group])
- results.append(result)
-
- # iterate while we have group execution results to get
- while len(results) > 0:
-
- # iterate over a copy to be able to safely delete results
- # this iterates over a list of group results
- for group_result in results[:]:
-
- # if the thread hasn't finished yet, continue
- if not group_result.ready():
- continue
-
- res = group_result.get()
-
- self.__process_results(res)
-
- # remove result from results list once we're done with it
- results.remove(group_result)
-
- # run non_parallel tests. they are run one by one, synchronously
- for test_group in self.non_parallel_test_groups:
- group_result = run_test_group(self.__get_cmdline(test_group), test_group)
-
- self.__process_results(group_result)
-
- # get total run time
- cur_time = time.time()
- total_time = int(cur_time - self.start)
-
- # print out summary
- print "=" * 80
- print "Total run time: %02dm %02ds" % (total_time / 60, total_time % 60)
- if self.fails != 0:
- print "Number of failed tests: %s" % str(self.fails)
-
- # write summary to logfile
- self.logfile.write("Summary\n")
- self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
- self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
- self.logfile.write("Failed tests: ".ljust(15) + "%i\n" % self.fails)
- except:
- print "Exception occured"
- print sys.exc_info()
- self.fails = 1
-
- # drop logs from all executions to a logfile
- for buf in self.log_buffers:
- self.logfile.write(buf.replace("\r",""))
-
- log_buffers = []
-
- return self.fails
+ cmdline = ""
+ parallel_test_groups = []
+ non_parallel_test_groups = []
+ logfile = None
+ csvwriter = None
+ target = ""
+ start = None
+ n_tests = 0
+ fails = 0
+ log_buffers = []
+ blacklist = []
+ whitelist = []
+
+ def __init__(self, cmdline, target, blacklist, whitelist):
+ self.cmdline = cmdline
+ self.target = target
+ self.blacklist = blacklist
+ self.whitelist = whitelist
+
+ # log file filename
+ logfile = "%s.log" % target
+ csvfile = "%s.csv" % target
+
+ self.logfile = open(logfile, "w")
+ csvfile = open(csvfile, "w")
+ self.csvwriter = csv.writer(csvfile)
+
+ # prepare results table
+ self.csvwriter.writerow(["test_name", "test_result", "result_str"])
+
+ # set up cmdline string
+ def __get_cmdline(self, test):
+ cmdline = self.cmdline
+
+ # append memory limitations for each test
+ # otherwise tests won't run in parallel
+ if "i686" not in self.target:
+ cmdline += " --socket-mem=%s" % test["Memory"]
+ else:
+ # affinitize startup so that tests don't fail on i686
+ cmdline = "taskset 1 " + cmdline
+ cmdline += " -m " + str(sum(map(int, test["Memory"].split(","))))
+
+ # set group prefix for autotest group
+ # otherwise they won't run in parallel
+ cmdline += " --file-prefix=%s" % test["Prefix"]
+
+ return cmdline
+
+ def add_parallel_test_group(self, test_group):
+ self.parallel_test_groups.append(test_group)
+
+ def add_non_parallel_test_group(self, test_group):
+ self.non_parallel_test_groups.append(test_group)
+
+ def __process_results(self, results):
+ # this iterates over individual test results
+ for i, result in enumerate(results):
+
+ # increase total number of tests that were run
+ # do not include "start" test
+ if i > 0:
+ self.n_tests += 1
+
+ # unpack result tuple
+ test_result, result_str, test_name, \
+ test_time, log, report = result
+
+ # get total run time
+ cur_time = time.time()
+ total_time = int(cur_time - self.start)
+
+ # print results, test run time and total time since start
+ print ("%s:" % test_name).ljust(30),
+ print result_str.ljust(29),
+ print "[%02dm %02ds]" % (test_time / 60, test_time % 60),
+
+ # don't print out total time every line, it's the same anyway
+ if i == len(results) - 1:
+ print "[%02dm %02ds]" % (total_time / 60, total_time % 60)
+ else:
+ print ""
+
+ # if test failed and it wasn't a "start" test
+ if test_result < 0 and not i == 0:
+ self.fails += 1
+
+ # collect logs
+ self.log_buffers.append(log)
+
+ # create report if it exists
+ if report:
+ try:
+ f = open("%s_%s_report.rst" %
+ (self.target, test_name), "w")
+ except IOError:
+ print "Report for %s could not be created!" % test_name
+ else:
+ with f:
+ f.write(report)
+
+ # write test result to CSV file
+ if i != 0:
+ self.csvwriter.writerow([test_name, test_result, result_str])
+
+ # this function iterates over test groups and removes each
+ # test that is not in whitelist/blacklist
+ def __filter_groups(self, test_groups):
+ groups_to_remove = []
+
+ # filter out tests from parallel test groups
+ for i, test_group in enumerate(test_groups):
+
+ # iterate over a copy so that we could safely delete individual
+ # tests
+ for test in test_group["Tests"][:]:
+ test_id = test["Command"]
+
+ # dump tests are specified in full e.g. "Dump_mempool"
+ if "_autotest" in test_id:
+ test_id = test_id[:-len("_autotest")]
+
+ # filter out blacklisted/whitelisted tests
+ if self.blacklist and test_id in self.blacklist:
+ test_group["Tests"].remove(test)
+ continue
+ if self.whitelist and test_id not in self.whitelist:
+ test_group["Tests"].remove(test)
+ continue
+
+ # modify or remove original group
+ if len(test_group["Tests"]) > 0:
+ test_groups[i] = test_group
+ else:
+ # remember which groups should be deleted
+ # put the numbers backwards so that we start
+ # deleting from the end, not from the beginning
+ groups_to_remove.insert(0, i)
+
+ # remove test groups that need to be removed
+ for i in groups_to_remove:
+ del test_groups[i]
+
+ return test_groups
+
+ # iterate over test groups and run tests associated with them
+ def run_all_tests(self):
+ # filter groups
+ self.parallel_test_groups = \
+ self.__filter_groups(self.parallel_test_groups)
+ self.non_parallel_test_groups = \
+ self.__filter_groups(self.non_parallel_test_groups)
+
+ # create a pool of worker threads
+ pool = multiprocessing.Pool(processes=1)
+
+ results = []
+
+ # whatever happens, try to save as much logs as possible
+ try:
+
+ # create table header
+ print ""
+ print "Test name".ljust(30),
+ print "Test result".ljust(29),
+ print "Test".center(9),
+ print "Total".center(9)
+ print "=" * 80
+
+ # make a note of tests start time
+ self.start = time.time()
+
+ # assign worker threads to run test groups
+ for test_group in self.parallel_test_groups:
+ result = pool.apply_async(run_test_group,
+ [self.__get_cmdline(test_group),
+ test_group])
+ results.append(result)
+
+ # iterate while we have group execution results to get
+ while len(results) > 0:
+
+ # iterate over a copy to be able to safely delete results
+ # this iterates over a list of group results
+ for group_result in results[:]:
+
+ # if the thread hasn't finished yet, continue
+ if not group_result.ready():
+ continue
+
+ res = group_result.get()
+
+ self.__process_results(res)
+
+ # remove result from results list once we're done with it
+ results.remove(group_result)
+
+ # run non_parallel tests. they are run one by one, synchronously
+ for test_group in self.non_parallel_test_groups:
+ group_result = run_test_group(
+ self.__get_cmdline(test_group), test_group)
+
+ self.__process_results(group_result)
+
+ # get total run time
+ cur_time = time.time()
+ total_time = int(cur_time - self.start)
+
+ # print out summary
+ print "=" * 80
+ print "Total run time: %02dm %02ds" % (total_time / 60,
+ total_time % 60)
+ if self.fails != 0:
+ print "Number of failed tests: %s" % str(self.fails)
+
+ # write summary to logfile
+ self.logfile.write("Summary\n")
+ self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
+ self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
+ self.logfile.write("Failed tests: ".ljust(
+ 15) + "%i\n" % self.fails)
+ except:
+ print "Exception occurred"
+ print sys.exc_info()
+ self.fails = 1
+
+ # drop logs from all executions to a logfile
+ for buf in self.log_buffers:
+ self.logfile.write(buf.replace("\r", ""))
+
+ return self.fails
# Test functions
-import sys, pexpect, time, os, re
+import pexpect
# default autotest, used to run most tests
# waits for "Test OK"
+
+
def default_autotest(child, test_name):
- child.sendline(test_name)
- result = child.expect(["Test OK", "Test Failed",
- "Command not found", pexpect.TIMEOUT], timeout = 900)
- if result == 1:
- return -1, "Fail"
- elif result == 2:
- return -1, "Fail [Not found]"
- elif result == 3:
- return -1, "Fail [Timeout]"
- return 0, "Success"
+ child.sendline(test_name)
+ result = child.expect(["Test OK", "Test Failed",
+ "Command not found", pexpect.TIMEOUT], timeout=900)
+ if result == 1:
+ return -1, "Fail"
+ elif result == 2:
+ return -1, "Fail [Not found]"
+ elif result == 3:
+ return -1, "Fail [Timeout]"
+ return 0, "Success"
# autotest used to run dump commands
# just fires the command
+
+
def dump_autotest(child, test_name):
- child.sendline(test_name)
- return 0, "Success"
+ child.sendline(test_name)
+ return 0, "Success"
# memory autotest
# reads output and waits for Test OK
+
+
def memory_autotest(child, test_name):
- child.sendline(test_name)
- regexp = "phys:0x[0-9a-f]*, len:([0-9]*), virt:0x[0-9a-f]*, socket_id:[0-9]*"
- index = child.expect([regexp, pexpect.TIMEOUT], timeout = 180)
- if index != 0:
- return -1, "Fail [Timeout]"
- size = int(child.match.groups()[0], 16)
- if size <= 0:
- return -1, "Fail [Bad size]"
- index = child.expect(["Test OK", "Test Failed",
- pexpect.TIMEOUT], timeout = 10)
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
- return 0, "Success"
+ child.sendline(test_name)
+ regexp = "phys:0x[0-9a-f]*, len:([0-9]*), virt:0x[0-9a-f]*, " \
+ "socket_id:[0-9]*"
+ index = child.expect([regexp, pexpect.TIMEOUT], timeout=180)
+ if index != 0:
+ return -1, "Fail [Timeout]"
+ size = int(child.match.groups()[0], 16)
+ if size <= 0:
+ return -1, "Fail [Bad size]"
+ index = child.expect(["Test OK", "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+ return 0, "Success"
+
def spinlock_autotest(child, test_name):
- i = 0
- ir = 0
- child.sendline(test_name)
- while True:
- index = child.expect(["Test OK",
- "Test Failed",
- "Hello from core ([0-9]*) !",
- "Hello from within recursive locks from ([0-9]*) !",
- pexpect.TIMEOUT], timeout = 5)
- # ok
- if index == 0:
- break
-
- # message, check ordering
- elif index == 2:
- if int(child.match.groups()[0]) < i:
- return -1, "Fail [Bad order]"
- i = int(child.match.groups()[0])
- elif index == 3:
- if int(child.match.groups()[0]) < ir:
- return -1, "Fail [Bad order]"
- ir = int(child.match.groups()[0])
-
- # fail
- elif index == 4:
- return -1, "Fail [Timeout]"
- elif index == 1:
- return -1, "Fail"
-
- return 0, "Success"
+ i = 0
+ ir = 0
+ child.sendline(test_name)
+ while True:
+ index = child.expect(["Test OK",
+ "Test Failed",
+ "Hello from core ([0-9]*) !",
+ "Hello from within recursive locks "
+ "from ([0-9]*) !",
+ pexpect.TIMEOUT], timeout=5)
+ # ok
+ if index == 0:
+ break
+
+ # message, check ordering
+ elif index == 2:
+ if int(child.match.groups()[0]) < i:
+ return -1, "Fail [Bad order]"
+ i = int(child.match.groups()[0])
+ elif index == 3:
+ if int(child.match.groups()[0]) < ir:
+ return -1, "Fail [Bad order]"
+ ir = int(child.match.groups()[0])
+
+ # fail
+ elif index == 4:
+ return -1, "Fail [Timeout]"
+ elif index == 1:
+ return -1, "Fail"
+
+ return 0, "Success"
+
def rwlock_autotest(child, test_name):
- i = 0
- child.sendline(test_name)
- while True:
- index = child.expect(["Test OK",
- "Test Failed",
- "Hello from core ([0-9]*) !",
- "Global write lock taken on master core ([0-9]*)",
- pexpect.TIMEOUT], timeout = 10)
- # ok
- if index == 0:
- if i != 0xffff:
- return -1, "Fail [Message is missing]"
- break
-
- # message, check ordering
- elif index == 2:
- if int(child.match.groups()[0]) < i:
- return -1, "Fail [Bad order]"
- i = int(child.match.groups()[0])
-
- # must be the last message, check ordering
- elif index == 3:
- i = 0xffff
-
- elif index == 4:
- return -1, "Fail [Timeout]"
-
- # fail
- else:
- return -1, "Fail"
-
- return 0, "Success"
+ i = 0
+ child.sendline(test_name)
+ while True:
+ index = child.expect(["Test OK",
+ "Test Failed",
+ "Hello from core ([0-9]*) !",
+ "Global write lock taken on master "
+ "core ([0-9]*)",
+ pexpect.TIMEOUT], timeout=10)
+ # ok
+ if index == 0:
+ if i != 0xffff:
+ return -1, "Fail [Message is missing]"
+ break
+
+ # message, check ordering
+ elif index == 2:
+ if int(child.match.groups()[0]) < i:
+ return -1, "Fail [Bad order]"
+ i = int(child.match.groups()[0])
+
+ # must be the last message, check ordering
+ elif index == 3:
+ i = 0xffff
+
+ elif index == 4:
+ return -1, "Fail [Timeout]"
+
+ # fail
+ else:
+ return -1, "Fail"
+
+ return 0, "Success"
+
def logs_autotest(child, test_name):
- i = 0
- child.sendline(test_name)
-
- log_list = [
- "TESTAPP1: error message",
- "TESTAPP1: critical message",
- "TESTAPP2: critical message",
- "TESTAPP1: error message",
- ]
-
- for log_msg in log_list:
- index = child.expect([log_msg,
- "Test OK",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 10)
-
- if index == 3:
- return -1, "Fail [Timeout]"
- # not ok
- elif index != 0:
- return -1, "Fail"
-
- index = child.expect(["Test OK",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 10)
-
- return 0, "Success"
+ child.sendline(test_name)
+
+ log_list = [
+ "TESTAPP1: error message",
+ "TESTAPP1: critical message",
+ "TESTAPP2: critical message",
+ "TESTAPP1: error message",
+ ]
+
+ for log_msg in log_list:
+ index = child.expect([log_msg,
+ "Test OK",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+
+ if index == 3:
+ return -1, "Fail [Timeout]"
+ # not ok
+ elif index != 0:
+ return -1, "Fail"
+
+ index = child.expect(["Test OK",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+
+ return 0, "Success"
+
def timer_autotest(child, test_name):
- i = 0
- child.sendline(test_name)
-
- index = child.expect(["Start timer stress tests",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 5)
-
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
-
- index = child.expect(["Start timer stress tests 2",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 5)
-
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
-
- index = child.expect(["Start timer basic tests",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 5)
-
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
-
- prev_lcore_timer1 = -1
-
- lcore_tim0 = -1
- lcore_tim1 = -1
- lcore_tim2 = -1
- lcore_tim3 = -1
-
- while True:
- index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) count=([0-9]*) on core ([0-9]*)",
- "Test OK",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 10)
-
- if index == 1:
- break
-
- if index == 2:
- return -1, "Fail"
- elif index == 3:
- return -1, "Fail [Timeout]"
-
- try:
- t = int(child.match.groups()[0])
- id = int(child.match.groups()[1])
- cnt = int(child.match.groups()[2])
- lcore = int(child.match.groups()[3])
- except:
- return -1, "Fail [Cannot parse]"
-
- # timer0 always expires on the same core when cnt < 20
- if id == 0:
- if lcore_tim0 == -1:
- lcore_tim0 = lcore
- elif lcore != lcore_tim0 and cnt < 20:
- return -1, "Fail [lcore != lcore_tim0 (%d, %d)]"%(lcore, lcore_tim0)
- if cnt > 21:
- return -1, "Fail [tim0 cnt > 21]"
-
- # timer1 each time expires on a different core
- if id == 1:
- if lcore == lcore_tim1:
- return -1, "Fail [lcore == lcore_tim1 (%d, %d)]"%(lcore, lcore_tim1)
- lcore_tim1 = lcore
- if cnt > 10:
- return -1, "Fail [tim1 cnt > 30]"
-
- # timer0 always expires on the same core
- if id == 2:
- if lcore_tim2 == -1:
- lcore_tim2 = lcore
- elif lcore != lcore_tim2:
- return -1, "Fail [lcore != lcore_tim2 (%d, %d)]"%(lcore, lcore_tim2)
- if cnt > 30:
- return -1, "Fail [tim2 cnt > 30]"
-
- # timer0 always expires on the same core
- if id == 3:
- if lcore_tim3 == -1:
- lcore_tim3 = lcore
- elif lcore != lcore_tim3:
- return -1, "Fail [lcore_tim3 changed (%d -> %d)]"%(lcore, lcore_tim3)
- if cnt > 30:
- return -1, "Fail [tim3 cnt > 30]"
-
- # must be 2 different cores
- if lcore_tim0 == lcore_tim3:
- return -1, "Fail [lcore_tim0 (%d) == lcore_tim3 (%d)]"%(lcore_tim0, lcore_tim3)
-
- return 0, "Success"
+ child.sendline(test_name)
+
+ index = child.expect(["Start timer stress tests",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=5)
+
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+
+ index = child.expect(["Start timer stress tests 2",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=5)
+
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+
+ index = child.expect(["Start timer basic tests",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=5)
+
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+
+ lcore_tim0 = -1
+ lcore_tim1 = -1
+ lcore_tim2 = -1
+ lcore_tim3 = -1
+
+ while True:
+ index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) "
+ "count=([0-9]*) on core ([0-9]*)",
+ "Test OK",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+
+ if index == 1:
+ break
+
+ if index == 2:
+ return -1, "Fail"
+ elif index == 3:
+ return -1, "Fail [Timeout]"
+
+ try:
+ id = int(child.match.groups()[1])
+ cnt = int(child.match.groups()[2])
+ lcore = int(child.match.groups()[3])
+ except:
+ return -1, "Fail [Cannot parse]"
+
+ # timer0 always expires on the same core when cnt < 20
+ if id == 0:
+ if lcore_tim0 == -1:
+ lcore_tim0 = lcore
+ elif lcore != lcore_tim0 and cnt < 20:
+ return -1, "Fail [lcore != lcore_tim0 (%d, %d)]" \
+ % (lcore, lcore_tim0)
+ if cnt > 21:
+ return -1, "Fail [tim0 cnt > 21]"
+
+ # timer1 each time expires on a different core
+ if id == 1:
+ if lcore == lcore_tim1:
+ return -1, "Fail [lcore == lcore_tim1 (%d, %d)]" \
+ % (lcore, lcore_tim1)
+ lcore_tim1 = lcore
+ if cnt > 10:
+ return -1, "Fail [tim1 cnt > 30]"
+
+ # timer0 always expires on the same core
+ if id == 2:
+ if lcore_tim2 == -1:
+ lcore_tim2 = lcore
+ elif lcore != lcore_tim2:
+ return -1, "Fail [lcore != lcore_tim2 (%d, %d)]" \
+ % (lcore, lcore_tim2)
+ if cnt > 30:
+ return -1, "Fail [tim2 cnt > 30]"
+
+ # timer0 always expires on the same core
+ if id == 3:
+ if lcore_tim3 == -1:
+ lcore_tim3 = lcore
+ elif lcore != lcore_tim3:
+ return -1, "Fail [lcore_tim3 changed (%d -> %d)]" \
+ % (lcore, lcore_tim3)
+ if cnt > 30:
+ return -1, "Fail [tim3 cnt > 30]"
+
+ # must be 2 different cores
+ if lcore_tim0 == lcore_tim3:
+ return -1, "Fail [lcore_tim0 (%d) == lcore_tim3 (%d)]" \
+ % (lcore_tim0, lcore_tim3)
+
+ return 0, "Success"
+
def ring_autotest(child, test_name):
- child.sendline(test_name)
- index = child.expect(["Test OK", "Test Failed",
- pexpect.TIMEOUT], timeout = 2)
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
-
- child.sendline("set_watermark test 100")
- child.sendline("dump_ring test")
- index = child.expect([" watermark=100",
- pexpect.TIMEOUT], timeout = 1)
- if index != 0:
- return -1, "Fail [Bad watermark]"
-
- return 0, "Success"
+ child.sendline(test_name)
+ index = child.expect(["Test OK", "Test Failed",
+ pexpect.TIMEOUT], timeout=2)
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+
+ child.sendline("set_watermark test 100")
+ child.sendline("dump_ring test")
+ index = child.expect([" watermark=100",
+ pexpect.TIMEOUT], timeout=1)
+ if index != 0:
+ return -1, "Fail [Bad watermark]"
+
+ return 0, "Success"
html_show_copyright = False
highlight_language = 'none'
-version = subprocess.check_output(['make', '-sRrC', '../../', 'showversion']).decode('utf-8').rstrip()
+version = subprocess.check_output(['make', '-sRrC', '../../', 'showversion'])
+version = version.decode('utf-8').rstrip()
release = version
master_doc = 'index'
'preamble': latex_preamble
}
+
# Override the default Latex formatter in order to modify the
# code/verbatim blocks.
class CustomLatexFormatter(LatexFormatter):
("tools/devbind", "dpdk-devbind",
"check device status and bind/unbind them from drivers", "", 8)]
-######## :numref: fallback ########
+
+# ####### :numref: fallback ########
# The following hook functions add some simple handling for the :numref:
# directive for Sphinx versions prior to 1.3.1. The functions replace the
# :numref: reference with a link to the target (for all Sphinx doc types).
# It doesn't try to label figures/tables.
-
def numref_role(reftype, rawtext, text, lineno, inliner):
"""
Add a Sphinx role to handle numref references. Note, we can't convert
internal=True)
return [newnode], []
+
def process_numref(app, doctree, from_docname):
"""
Process the numref nodes once the doctree has been built and prior to
# the DPDK ip_pipeline application.
#
# The input configuration file is translated to an output file in DOT syntax,
-# which is then used to create the image file using graphviz (www.graphviz.org).
+# which is then used to create the image file using graphviz
+# (www.graphviz.org).
#
from __future__ import print_function
# SOURCEx | SOURCEx | SOURCEx | PIPELINEy | SOURCEx
# SINKx | SINKx | PIPELINEy | SINKx | SINKx
+
#
# Parse the input configuration file to detect the graph nodes and edges
#
#
print('Creating image file "%s" ...' % imgfile)
if os.system('which dot > /dev/null'):
- print('Error: Unable to locate "dot" executable.' \
- 'Please install the "graphviz" package (www.graphviz.org).')
+ print('Error: Unable to locate "dot" executable.'
+ 'Please install the "graphviz" package (www.graphviz.org).')
return
os.system(dot_cmd)
if __name__ == '__main__':
- parser = argparse.ArgumentParser(description=\
- 'Create diagram for IP pipeline configuration file.')
+ parser = argparse.ArgumentParser(description='Create diagram for IP '
+ 'pipeline configuration '
+ 'file.')
parser.add_argument(
'-f',
#
from __future__ import print_function
-import sys
-import errno
-import os
-import re
+from collections import namedtuple
+import argparse
import array
+import errno
import itertools
+import os
import re
-import argparse
-from collections import namedtuple
+import sys
# default values
enable_stage0_traceout = 1
cores = []
core_map = {}
-fd=open("/proc/cpuinfo")
+fd = open("/proc/cpuinfo")
lines = fd.readlines()
fd.close()
core_details = []
core_lines = {}
for line in lines:
- if len(line.strip()) != 0:
- name, value = line.split(":", 1)
- core_lines[name.strip()] = value.strip()
- else:
- core_details.append(core_lines)
- core_lines = {}
+ if len(line.strip()) != 0:
+ name, value = line.split(":", 1)
+ core_lines[name.strip()] = value.strip()
+ else:
+ core_details.append(core_lines)
+ core_lines = {}
for core in core_details:
- for field in ["processor", "core id", "physical id"]:
- if field not in core:
- print "Error getting '%s' value from /proc/cpuinfo" % field
- sys.exit(1)
- core[field] = int(core[field])
+ for field in ["processor", "core id", "physical id"]:
+ if field not in core:
+ print "Error getting '%s' value from /proc/cpuinfo" % field
+ sys.exit(1)
+ core[field] = int(core[field])
- if core["core id"] not in cores:
- cores.append(core["core id"])
- if core["physical id"] not in sockets:
- sockets.append(core["physical id"])
- key = (core["physical id"], core["core id"])
- if key not in core_map:
- core_map[key] = []
- core_map[key].append(core["processor"])
+ if core["core id"] not in cores:
+ cores.append(core["core id"])
+ if core["physical id"] not in sockets:
+ sockets.append(core["physical id"])
+ key = (core["physical id"], core["core id"])
+ if key not in core_map:
+ core_map[key] = []
+ core_map[key].append(core["processor"])
print "============================================================"
print "Core and Socket Information (as reported by '/proc/cpuinfo')"
print "============================================================\n"
-print "cores = ",cores
+print "cores = ", cores
print "sockets = ", sockets
print ""
print " ".ljust(max_core_id_len + len('Core ')),
for s in sockets:
- print "Socket %s" % str(s).ljust(max_core_map_len - len('Socket ')),
+ print "Socket %s" % str(s).ljust(max_core_map_len - len('Socket ')),
print ""
+
print " ".ljust(max_core_id_len + len('Core ')),
for s in sockets:
- print "--------".ljust(max_core_map_len),
+ print "--------".ljust(max_core_map_len),
print ""
for c in cores:
- print "Core %s" % str(c).ljust(max_core_id_len),
- for s in sockets:
- print str(core_map[(s,c)]).ljust(max_core_map_len),
- print ""
+ print "Core %s" % str(c).ljust(max_core_id_len),
+ for s in sockets:
+ print str(core_map[(s, c)]).ljust(max_core_map_len),
+ print ""
Unbind a device (Equivalent to \"-b none\")
--force:
- By default, network devices which are used by Linux - as indicated by having
- routes in the routing table - cannot be modified. Using the --force
- flag overrides this behavior, allowing active links to be forcibly
- unbound.
+ By default, network devices which are used by Linux - as indicated by
+ having routes in the routing table - cannot be modified. Using the
+ --force flag overrides this behavior, allowing active links to be
+ forcibly unbound.
WARNING: This can lead to loss of network connection and should be used
with caution.
# check for a copy based off current path
tools_dir = dirname(abspath(sys.argv[0]))
- if (tools_dir.endswith("tools")):
+ if tools_dir.endswith("tools"):
base_dir = dirname(tools_dir)
find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
if len(find_out) > 0: # something matched
dev = {}
dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
for dev_line in dev_lines:
- if (len(dev_line) == 0):
+ if len(dev_line) == 0:
if dev["Class"][0:2] == NETWORK_BASE_CLASS:
# convert device and vendor ids to numbers, then add to global
dev["Vendor"] = int(dev["Vendor"], 16)
dev = {}
dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
for dev_line in dev_lines:
- if (len(dev_line) == 0):
- if (dev["Class"][0:2] == CRYPTO_BASE_CLASS):
+ if len(dev_line) == 0:
+ if dev["Class"][0:2] == CRYPTO_BASE_CLASS:
# convert device and vendor ids to numbers, then add to global
dev["Vendor"] = int(dev["Vendor"], 16)
dev["Device"] = int(dev["Device"], 16)
for dev in dev_list:
if extra_params is not None:
strings.append("%s '%s' %s" % (dev["Slot"],
- dev["Device_str"], extra_params % dev))
+ dev["Device_str"],
+ extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
# split our list of network devices into the three categories above
for d in devices.keys():
- if (NETWORK_BASE_CLASS in devices[d]["Class"]):
+ if NETWORK_BASE_CLASS in devices[d]["Class"]:
if not has_driver(d):
no_drv.append(devices[d])
continue
no_drv = []
for d in devices.keys():
- if (CRYPTO_BASE_CLASS in devices[d]["Class"]):
+ if CRYPTO_BASE_CLASS in devices[d]["Class"]:
if not has_driver(d):
no_drv.append(devices[d])
continue
# Utility to dump PMD_INFO_STRING support from an object file
#
# -------------------------------------------------------------------------
+import json
import os
+import platform
+import string
import sys
+from elftools.common.exceptions import ELFError
+from elftools.common.py3compat import (byte2int, bytes2str, str2bytes)
+from elftools.elf.elffile import ELFFile
from optparse import OptionParser
-import string
-import json
-import platform
# For running from development directory. It should take precedence over the
# installed pyelftools.
sys.path.insert(0, '.')
-
-from elftools import __version__
-from elftools.common.exceptions import ELFError
-from elftools.common.py3compat import (
- ifilter, byte2int, bytes2str, itervalues, str2bytes)
-from elftools.elf.elffile import ELFFile
-from elftools.elf.dynamic import DynamicSection, DynamicSegment
-from elftools.elf.enums import ENUM_D_TAG
-from elftools.elf.segments import InterpSegment
-from elftools.elf.sections import SymbolTableSection
-from elftools.elf.gnuversions import (
- GNUVerSymSection, GNUVerDefSection,
- GNUVerNeedSection,
-)
-from elftools.elf.relocation import RelocationSection
-from elftools.elf.descriptions import (
- describe_ei_class, describe_ei_data, describe_ei_version,
- describe_ei_osabi, describe_e_type, describe_e_machine,
- describe_e_version_numeric, describe_p_type, describe_p_flags,
- describe_sh_type, describe_sh_flags,
- describe_symbol_type, describe_symbol_bind, describe_symbol_visibility,
- describe_symbol_shndx, describe_reloc_type, describe_dyn_tag,
- describe_ver_flags,
-)
-from elftools.elf.constants import E_FLAGS
-from elftools.dwarf.dwarfinfo import DWARFInfo
-from elftools.dwarf.descriptions import (
- describe_reg_name, describe_attr_value, set_global_machine_arch,
- describe_CFI_instructions, describe_CFI_register_rule,
- describe_CFI_CFA_rule,
-)
-from elftools.dwarf.constants import (
- DW_LNS_copy, DW_LNS_set_file, DW_LNE_define_file)
-from elftools.dwarf.callframe import CIE, FDE
-
raw_output = False
pcidb = None
for i in optional_pmd_info:
try:
print("%s: %s" % (i['tag'], pmdinfo[i['id']]))
- except KeyError as e:
+ except KeyError:
continue
if (len(pmdinfo["pci_ids"]) != 0):
with open(library, 'rb') as file:
try:
libelf = ReadElf(file, sys.stdout)
- except ELFError as e:
+ except ELFError:
print("%s is no an ELF file" % library)
continue
libelf.process_dt_needed_entries()
try:
dirs = os.listdir(autoload_path)
- except OSError as e:
+ except OSError:
# Couldn't read the directory, give up
return
try:
file = open(dpath, 'rb')
readelf = ReadElf(file, sys.stdout)
- except ELFError as e:
+ except ELFError:
# this is likely not an elf file, skip it
continue
- except IOError as e:
+ except IOError:
# No permission to read the file, skip it
continue
file = open(dpdk_path, 'rb')
try:
readelf = ReadElf(file, sys.stdout)
- except ElfError as e:
+ except ElfError:
if raw_output is False:
print("Unable to parse %s" % file)
return
global raw_output
global pcidb
- pcifile_default = "./pci.ids" # for unknown OS's assume local file
+ pcifile_default = "./pci.ids" # For unknown OS's assume local file
if platform.system() == 'Linux':
pcifile_default = "/usr/share/hwdata/pci.ids"
elif platform.system() == 'FreeBSD':
"to get vendor names from",
default=pcifile_default, metavar="FILE")
optparser.add_option("-t", "--table", dest="tblout",
- help="output information on hw support as a hex table",
+ help="output information on hw support as a "
+ "hex table",
action='store_true')
optparser.add_option("-p", "--plugindir", dest="pdir",
help="scan dpdk for autoload plugins",