aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/test
diff options
context:
space:
mode:
authorEgor Tensin <Egor.Tensin@gmail.com>2016-02-13 01:47:40 +0300
committerEgor Tensin <Egor.Tensin@gmail.com>2016-02-13 01:47:40 +0300
commita299d7c377b52b7efac383b3840b11372cd63478 (patch)
tree6296833e561c5968409df2dcbea9b0e357d3ecb1 /test
parenttest: use Enum (diff)
downloadaes-tools-a299d7c377b52b7efac383b3840b11372cd63478.tar.gz
aes-tools-a299d7c377b52b7efac383b3840b11372cd63478.zip
test: code style
Diffstat (limited to 'test')
-rw-r--r--test/cavp.py50
-rw-r--r--test/file.py53
-rw-r--r--test/nist-sp-800-38a.py8
3 files changed, 56 insertions, 55 deletions
diff --git a/test/cavp.py b/test/cavp.py
index 00fe715..fcc5341 100644
--- a/test/cavp.py
+++ b/test/cavp.py
@@ -5,6 +5,7 @@
from collections import OrderedDict
import configparser
from datetime import datetime
+from enum import Enum
import logging
import os.path
import sys
@@ -29,7 +30,7 @@ def _split_into_chunks(expected_output, inputs, max_len=100):
for i in range(0, len(inputs), max_len):
yield expected_output[i:i+max_len], inputs[i:i+max_len]
-def _assert_output(actual, expected):
+def verify_test_output(actual, expected):
if len(actual) != len(expected):
logging.error('Unexpected output length {0} (expected {1})'.format(len(actual), len(expected)))
return False
@@ -38,8 +39,8 @@ def _assert_output(actual, expected):
return False
return True
-class _TestExitCode:
- SUCCESS, FAILURE, ERROR, SKIPPED = range(4)
+class TestExitCode(Enum):
+ SUCCESS, FAILURE, ERROR, SKIPPED = range(1, 5)
class _TestVectorsFile:
def __init__(self, path, archive):
@@ -78,9 +79,9 @@ class _TestVectorsFile:
def _run_tests(self, tool, inputs, expected_output, use_boxes=False):
for expected_output_chunk, input_chunk in _split_into_chunks(expected_output, list(inputs)):
actual_output = tool(self.algorithm(), self.mode(), input_chunk, use_boxes=use_boxes)
- if not _assert_output(actual_output, expected_output_chunk):
- return _TestExitCode.FAILURE
- return _TestExitCode.SUCCESS
+ if not verify_test_output(actual_output, expected_output_chunk):
+ return TestExitCode.FAILURE
+ return TestExitCode.SUCCESS
def run_encryption_tests(self, tools, use_boxes=False):
logging.info('Running encryption tests...')
@@ -152,26 +153,30 @@ def _parse_archive_and_run_tests(tools, archive_path, use_boxes=False):
except Exception as e:
logging.error('Encountered an exception!')
logging.exception(e)
- exit_codes.append(_TestExitCode.ERROR)
+ exit_codes.append(TestExitCode.ERROR)
try:
exit_codes.append(member.run_decryption_tests(tools, use_boxes))
except Exception as e:
logging.error('Encountered an exception!')
logging.exception(e)
- exit_codes.append(_TestExitCode.ERROR)
+ exit_codes.append(TestExitCode.ERROR)
else:
- exit_codes.append(_TestExitCode.SKIPPED)
+ exit_codes.append(TestExitCode.SKIPPED)
logging.info('Test exit codes:')
- logging.info('\tSkipped: {0}'.format(exit_codes.count(_TestExitCode.SKIPPED)))
- logging.info('\tError(s): {0}'.format(exit_codes.count(_TestExitCode.ERROR)))
- logging.info('\tSucceeded: {0}'.format(exit_codes.count(_TestExitCode.SUCCESS)))
- logging.info('\tFailed: {0}'.format(exit_codes.count(_TestExitCode.FAILURE)))
- if (exit_codes.count(_TestExitCode.ERROR) == 0 and
- exit_codes.count(_TestExitCode.FAILURE) == 0):
+ logging.info('\tSkipped: {}'.format(exit_codes.count(TestExitCode.SKIPPED)))
+ logging.info('\tError(s): {}'.format(exit_codes.count(TestExitCode.ERROR)))
+ logging.info('\tSucceeded: {}'.format(exit_codes.count(TestExitCode.SUCCESS)))
+ logging.info('\tFailed: {}'.format(exit_codes.count(TestExitCode.FAILURE)))
+ if (exit_codes.count(TestExitCode.ERROR) == 0 and
+ exit_codes.count(TestExitCode.FAILURE) == 0):
sys.exit()
else:
sys.exit(1)
+def _build_default_log_path():
+ return datetime.now().strftime('{}_%Y-%m-%d_%H-%M-%S.log').format(
+ os.path.splitext(os.path.basename(__file__))[0])
+
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
@@ -183,18 +188,13 @@ if __name__ == '__main__':
help='use the "boxes" interface')
parser.add_argument('--archive', '-a', default='KAT_AES.zip',
help='set path of the archive with the test vectors')
- parser.add_argument('--log', '-l', help='set log file path')
+ parser.add_argument('--log', '-l', default=_build_default_log_path(),
+ help='set log file path')
args = parser.parse_args()
- logging_options = {
- 'format': '%(asctime)s | %(module)s | %(levelname)s | %(message)s',
- 'level': logging.DEBUG }
-
- if args.log is None:
- logging_options['filename'] = datetime.now().strftime('cavp_%Y-%m-%d_%H-%M-%S.log')
- else:
- logging_options['filename'] = args.log
- logging.basicConfig(**logging_options)
+ logging.basicConfig(filename=args.log,
+ format='%(asctime)s | %(module)s | %(levelname)s | %(message)s',
+ level=logging.DEBUG)
tools = Tools(args.path, use_sde=args.sde)
_parse_archive_and_run_tests(tools, args.archive, use_boxes=args.use_boxes)
diff --git a/test/file.py b/test/file.py
index 3f07aaa..a918984 100644
--- a/test/file.py
+++ b/test/file.py
@@ -3,6 +3,7 @@
# See LICENSE.txt for details.
from datetime import datetime
+from enum import Enum
from glob import iglob as glob
import filecmp
import logging
@@ -13,8 +14,8 @@ from tempfile import TemporaryDirectory
from toolkit import *
-class _TestExitCode:
- SUCCESS, FAILURE, ERROR, SKIPPED = range(4)
+class TestExitCode(Enum):
+ SUCCESS, FAILURE, ERROR, SKIPPED = range(1, 5)
_KEY_EXT = 'key'
_IV_EXT = 'iv'
@@ -31,12 +32,12 @@ def _run_encryption_test(tools, tmp_dir, algorithm, mode, key, plain_path, ciphe
if force:
logging.warn('Overwriting expected ciphertext file')
shutil.copy(tmp_path, cipher_path)
- return _TestExitCode.SKIPPED
+ return TestExitCode.SKIPPED
if filecmp.cmp(cipher_path, tmp_path):
- return _TestExitCode.SUCCESS
+ return TestExitCode.SUCCESS
else:
logging.error('The encrypted file doesn\'t match the ciphertext file')
- return _TestExitCode.FAILURE
+ return TestExitCode.FAILURE
def _run_decryption_test(tools, tmp_dir, algorithm, mode, key, cipher_path, plain_path, iv=None):
logging.info('Running decryption test...')
@@ -46,10 +47,10 @@ def _run_decryption_test(tools, tmp_dir, algorithm, mode, key, cipher_path, plai
logging.info('\tDecrypted file path: ' + tmp_path)
tools.run_decrypt_file(algorithm, mode, key, cipher_path, tmp_path, iv)
if filecmp.cmp(tmp_path, plain_path):
- return _TestExitCode.SUCCESS
+ return TestExitCode.SUCCESS
else:
logging.error('The decrypted file doesn\'t match the plaintext file')
- return _TestExitCode.FAILURE
+ return TestExitCode.FAILURE
def _list_dirs(root_path):
xs = map(lambda x: os.path.join(root_path, x), os.listdir(root_path))
@@ -97,7 +98,7 @@ def _run_tests(tools, suite_dir, force=False):
maybe_algorithm = Algorithm.try_parse(algorithm)
if maybe_algorithm is None:
logging.warn('Unknown or unsupported algorithm: ' + algorithm)
- exit_codes.append(_TestExitCode.SKIPPED)
+ exit_codes.append(TestExitCode.SKIPPED)
continue
algorithm = maybe_algorithm
logging.info('Algorithm: {}'.format(algorithm))
@@ -106,7 +107,7 @@ def _run_tests(tools, suite_dir, force=False):
maybe_mode = Mode.try_parse(mode)
if maybe_mode is None:
logging.warn('Unknown or unsupported mode: ' + mode)
- exit_codes.append(_TestExitCode.SKIPPED)
+ exit_codes.append(TestExitCode.SKIPPED)
continue
mode = maybe_mode
logging.info('Mode: {}'.format(mode))
@@ -129,7 +130,7 @@ def _run_tests(tools, suite_dir, force=False):
except Exception as e:
logging.error('Encountered an exception!')
logging.exception(e)
- exit_codes.append(_TestExitCode.ERROR)
+ exit_codes.append(TestExitCode.ERROR)
if not force:
try:
exit_codes.append(_run_decryption_test(
@@ -138,18 +139,22 @@ def _run_tests(tools, suite_dir, force=False):
except Exception as e:
logging.error('Encountered an exception!')
logging.exception(e)
- exit_codes.append(_TestExitCode.ERROR)
+ exit_codes.append(TestExitCode.ERROR)
logging.info('Test exit codes:')
- logging.info('\tSkipped: {0}'.format(exit_codes.count(_TestExitCode.SKIPPED)))
- logging.info('\tError(s): {0}'.format(exit_codes.count(_TestExitCode.ERROR)))
- logging.info('\tSucceeded: {0}'.format(exit_codes.count(_TestExitCode.SUCCESS)))
- logging.info('\tFailed: {0}'.format(exit_codes.count(_TestExitCode.FAILURE)))
- if (exit_codes.count(_TestExitCode.ERROR) == 0 and
- exit_codes.count(_TestExitCode.FAILURE) == 0):
+ logging.info('\tSkipped: {}'.format(exit_codes.count(TestExitCode.SKIPPED)))
+ logging.info('\tError(s): {}'.format(exit_codes.count(TestExitCode.ERROR)))
+ logging.info('\tSucceeded: {}'.format(exit_codes.count(TestExitCode.SUCCESS)))
+ logging.info('\tFailed: {}'.format(exit_codes.count(TestExitCode.FAILURE)))
+ if (exit_codes.count(TestExitCode.ERROR) == 0 and
+ exit_codes.count(TestExitCode.FAILURE) == 0):
sys.exit()
else:
sys.exit(1)
+def _build_default_log_path():
+ return datetime.now().strftime('{}_%Y-%m-%d_%H-%M-%S.log').format(
+ os.path.splitext(os.path.basename(__file__))[0])
+
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
@@ -157,21 +162,17 @@ if __name__ == '__main__':
help='set path to file encryption utilities')
parser.add_argument('--sde', '-e', action='store_true',
help='use Intel SDE to run *.exe files')
- parser.add_argument('--log', '-l', help='set log file path')
+ parser.add_argument('--log', '-l', default=_build_default_log_path(),
+ help='set log file path')
parser.add_argument('--force', '-f', action='store_true',
help='overwrite ciphertext files')
parser.add_argument('--suite', '-s', default='file',
help='set test suite directory path')
args = parser.parse_args()
- logging_options = {
- 'format': '%(asctime)s | %(module)s | %(levelname)s | %(message)s',
- 'level': logging.DEBUG }
- if args.log is None:
- logging_options['filename'] = datetime.now().strftime('file_%Y-%m-%d_%H-%M-%S.log')
- else:
- logging_options['filename'] = args.log
- logging.basicConfig(**logging_options)
+ logging.basicConfig(filename=args.log,
+ format='%(asctime)s | %(module)s | %(levelname)s | %(message)s',
+ level=logging.DEBUG)
tools = Tools(args.path, use_sde=args.sde)
_run_tests(tools, args.suite, args.force)
diff --git a/test/nist-sp-800-38a.py b/test/nist-sp-800-38a.py
index c0ad9e6..1bbd3bc 100644
--- a/test/nist-sp-800-38a.py
+++ b/test/nist-sp-800-38a.py
@@ -240,10 +240,10 @@ if __name__ == '__main__':
exit_codes.extend(_run_tests(tools, algorithm, mode, use_boxes=args.use_boxes))
logging.info('Test exit codes:')
- logging.info('\tSkipped: {0}'.format(exit_codes.count(TestExitCode.SKIPPED)))
- logging.info('\tError(s): {0}'.format(exit_codes.count(TestExitCode.ERROR)))
- logging.info('\tSucceeded: {0}'.format(exit_codes.count(TestExitCode.SUCCESS)))
- logging.info('\tFailed: {0}'.format(exit_codes.count(TestExitCode.FAILURE)))
+ logging.info('\tSkipped: {}'.format(exit_codes.count(TestExitCode.SKIPPED)))
+ logging.info('\tError(s): {}'.format(exit_codes.count(TestExitCode.ERROR)))
+ logging.info('\tSucceeded: {}'.format(exit_codes.count(TestExitCode.SUCCESS)))
+ logging.info('\tFailed: {}'.format(exit_codes.count(TestExitCode.FAILURE)))
if (exit_codes.count(TestExitCode.ERROR) == 0 and
exit_codes.count(TestExitCode.FAILURE) == 0):
sys.exit()