1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
|
# This file is part of cloud-init. See LICENSE file for license information.
"""Verify test results."""
import os
import unittest
from tests.cloud_tests import (config, LOG, util, testcases)
def verify_data(data_dir, platform, os_name, tests):
"""Verify test data is correct.
@param data_dir: top level directory for all tests
@param platform: The platform name we for this test data (e.g. lxd)
@param os_name: The operating system under test (xenial, artful, etc.).
@param tests: list of test names
@return_value: {<test_name>: {passed: True/False, failures: []}}
"""
base_dir = os.sep.join((data_dir, platform, os_name))
runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
res = {}
for test_name in tests:
LOG.debug('verifying test data for %s', test_name)
# get cloudconfig for test
test_conf = config.load_test_config(test_name)
test_module = config.name_to_module(test_name)
cloud_conf = test_conf['cloud_config']
# load script outputs
data = {'platform': platform, 'os_name': os_name}
test_dir = os.path.join(base_dir, test_name)
for script_name in os.listdir(test_dir):
with open(os.path.join(test_dir, script_name), 'rb') as fp:
data[script_name] = fp.read()
# get test suite and launch tests
suite = testcases.get_suite(test_module, data, cloud_conf)
suite_results = runner.run(suite)
res[test_name] = {
'passed': suite_results.wasSuccessful(),
'failures': [{'module': type(test_class).__base__.__module__,
'class': type(test_class).__base__.__name__,
'function': str(test_class).split()[0],
'error': trace.splitlines()[-1],
'traceback': trace, }
for test_class, trace in suite_results.failures]
}
for failure in res[test_name]['failures']:
LOG.warning('test case: %s failed %s.%s with: %s',
test_name, failure['class'], failure['function'],
failure['error'])
return res
def verify(args):
"""Verify test data.
@param args: directory of test data
@return_value: 0 for success, or number of failed tests
"""
failed = 0
res = {}
# find test data
tests = util.list_test_data(args.data_dir)
for platform in tests.keys():
res[platform] = {}
for os_name in tests[platform].keys():
test_name = "platform='{}', os='{}'".format(platform, os_name)
LOG.info('test: %s verifying test data', test_name)
# run test
res[platform][os_name] = verify_data(
args.data_dir, platform, os_name,
tests[platform][os_name])
# handle results
fail_list = [k for k, v in res[platform][os_name].items()
if not v.get('passed')]
if len(fail_list) == 0:
LOG.info('test: %s passed all tests', test_name)
else:
LOG.warning('test: %s failed %s tests', test_name,
len(fail_list))
failed += len(fail_list)
# dump results
LOG.debug('verify results: %s', res)
if args.result:
util.merge_results({'verify': res}, args.result)
return failed
# vi: ts=4 expandtab
|