mirror of https://github.com/tc39/test262.git
commit
e0e5c203e9
|
@ -62,6 +62,18 @@ jobs:
|
||||||
# - run:
|
# - run:
|
||||||
# name: "Run deploy"
|
# name: "Run deploy"
|
||||||
# command: ./tools/scripts/deploy.sh
|
# command: ./tools/scripts/deploy.sh
|
||||||
|
"Project lint unit tests on Python 3":
|
||||||
|
docker:
|
||||||
|
- image: circleci/python:3.7.4
|
||||||
|
working_directory: ~/test262
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run:
|
||||||
|
name: "Install requirements for linter tool"
|
||||||
|
command: python -m pip install --user --requirement tools/lint/requirements.txt
|
||||||
|
- run:
|
||||||
|
name: "Test the lint tool"
|
||||||
|
command: ./tools/lint/test/run.py
|
||||||
"V8: New or modified tests execution":
|
"V8: New or modified tests execution":
|
||||||
docker:
|
docker:
|
||||||
- image: *node_latest
|
- image: *node_latest
|
||||||
|
@ -125,6 +137,7 @@ workflows:
|
||||||
Tools:
|
Tools:
|
||||||
jobs:
|
jobs:
|
||||||
- "Project lint, generation tests and build"
|
- "Project lint, generation tests and build"
|
||||||
|
- "Project lint unit tests on Python 3"
|
||||||
Tests execution:
|
Tests execution:
|
||||||
jobs:
|
jobs:
|
||||||
- "ChakraCore: New or modified tests execution"
|
- "ChakraCore: New or modified tests execution"
|
||||||
|
|
|
@ -55,9 +55,10 @@ class CheckHarnessFeatures(Check):
|
||||||
return
|
return
|
||||||
|
|
||||||
if len(result['missing']) > 0:
|
if len(result['missing']) > 0:
|
||||||
|
missing = ', '.join(sorted(result['missing']))
|
||||||
if len(result['features']) == 0:
|
if len(result['features']) == 0:
|
||||||
return 'Missing: `features: [%s]`' % ', '.join(list(result['missing']))
|
return 'Missing: `features: [%s]`' % missing
|
||||||
else:
|
else:
|
||||||
return 'Missing from `features`: %s' % ', '.join(list(result['missing']))
|
return 'Missing from `features`: %s' % missing
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
|
@ -5,7 +5,7 @@ _THROW_STMT = re.compile(
|
||||||
r'^\$DONOTEVALUATE\(\);$',
|
r'^\$DONOTEVALUATE\(\);$',
|
||||||
re.MULTILINE)
|
re.MULTILINE)
|
||||||
|
|
||||||
_THROW_STMT_LEGACY = re.compile(
|
_THROW_STMT_RAW = re.compile(
|
||||||
r'^throw "Test262: This statement should not be evaluated\.";$',
|
r'^throw "Test262: This statement should not be evaluated\.";$',
|
||||||
re.MULTILINE)
|
re.MULTILINE)
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ class CheckNegative(Check):
|
||||||
|
|
||||||
if negative["phase"] in ["parse", "resolution"]:
|
if negative["phase"] in ["parse", "resolution"]:
|
||||||
if meta.get('flags') and 'raw' in meta['flags']:
|
if meta.get('flags') and 'raw' in meta['flags']:
|
||||||
if not _THROW_STMT_LEGACY.search(source):
|
if not _THROW_STMT_RAW.search(source):
|
||||||
return 'Negative tests of type "early" must include a `throw` statement'
|
return 'Negative tests of type "early" must include a `throw` statement'
|
||||||
elif not _THROW_STMT.search(source):
|
elif not _THROW_STMT.search(source):
|
||||||
return 'Negative tests of type "early" must include a $DONOTEVALUATE() call'
|
return 'Negative tests of type "early" must include a $DONOTEVALUATE() call'
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
# Copyright (C) 2017 Mike Pennisi. All rights reserved.
|
# Copyright (C) 2017 Mike Pennisi. All rights reserved.
|
||||||
# This code is governed by the BSD license found in the LICENSE file.
|
# This code is governed by the BSD license found in the LICENSE file.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import inflect
|
import inflect
|
||||||
import os
|
import os
|
||||||
|
@ -91,25 +93,25 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
files = [path for _path in args.path for path in collect_files(_path)]
|
files = [path for _path in args.path for path in collect_files(_path)]
|
||||||
file_count = len(files)
|
file_count = len(files)
|
||||||
print 'Linting %s %s' % (file_count, ie.plural('file', file_count))
|
print('Linting %s %s' % (file_count, ie.plural('file', file_count)))
|
||||||
|
|
||||||
all_errors = lint(files)
|
all_errors = lint(files)
|
||||||
unexpected_errors = dict(all_errors)
|
unexpected_errors = dict(all_errors)
|
||||||
|
|
||||||
for file_name, failures in all_errors.iteritems():
|
for file_name, failures in all_errors.items():
|
||||||
if file_name not in exceptions:
|
if file_name not in exceptions:
|
||||||
continue
|
continue
|
||||||
if set(failures.keys()) == exceptions[file_name]:
|
if set(failures.keys()) == exceptions[file_name]:
|
||||||
del unexpected_errors[file_name]
|
del unexpected_errors[file_name]
|
||||||
|
|
||||||
error_count = len(unexpected_errors)
|
error_count = len(unexpected_errors)
|
||||||
print 'Linting complete. %s %s found.' % (error_count, ie.plural('error', error_count))
|
print('Linting complete. %s %s found.' % (error_count, ie.plural('error', error_count)))
|
||||||
|
|
||||||
if error_count == 0:
|
if error_count == 0:
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
for file_name, failures in iter(sorted(unexpected_errors.iteritems())):
|
for file_name, failures in iter(sorted(unexpected_errors.items())):
|
||||||
for ID, message in failures.iteritems():
|
for ID, message in failures.items():
|
||||||
eprint('%s: %s - %s' % (os.path.abspath(file_name), ID, message))
|
eprint('%s: %s - %s' % (os.path.abspath(file_name), ID, message))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
HARNESS_FEATURES - Missing: `features: [Symbol.toPrimitive, BigInt]`
|
HARNESS_FEATURES - Missing: `features: [BigInt, Symbol.toPrimitive]`
|
||||||
^ expected errors | v input
|
^ expected errors | v input
|
||||||
// Copyright (C) 2017 Rick Waldron. All rights reserved.
|
// Copyright (C) 2017 Rick Waldron. All rights reserved.
|
||||||
// This code is governed by the BSD license found in the LICENSE file.
|
// This code is governed by the BSD license found in the LICENSE file.
|
||||||
|
|
|
@ -78,11 +78,12 @@ def create_file_test(name, fspath):
|
||||||
result = self.lint([tmp_file])
|
result = self.lint([tmp_file])
|
||||||
if len(expected) == 0:
|
if len(expected) == 0:
|
||||||
self.assertEqual(result['returncode'], 0)
|
self.assertEqual(result['returncode'], 0)
|
||||||
self.assertEqual(result['stderr'], '')
|
self.assertEqual(result['stderr'], b'')
|
||||||
else:
|
else:
|
||||||
self.assertNotEqual(result['returncode'], 0)
|
self.assertNotEqual(result['returncode'], 0)
|
||||||
|
stderr = result['stderr'].decode("utf-8")
|
||||||
for err in expected:
|
for err in expected:
|
||||||
self.assertIn(err, result['stderr'])
|
self.assertIn(err, stderr)
|
||||||
|
|
||||||
return test
|
return test
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue