Module data_request_api.tests.test_optimize
Test data_request.py
Functions
def add_profiling(func)
-
Expand source code
def add_profiling(func): def do_profiling(self, *args, **kwargs): if self.profiling: pr = cProfile.Profile() pr.enable() rep = func(self, *args, **kwargs) if self.profiling: pr.disable() stdout = sys.stdout test_name = str(self) test_name = re.sub(r"(?P<name>.*) .*", r"\g<name>", test_name) file_name = filepath(f"profiling_{test_name}.txt") if os.path.isfile(file_name): os.remove(file_name) with codecs.open(file_name, "w", encoding="utf-8") as statsfile: sys.stdout = statsfile s = io.StringIO() sortby = "cumulative" ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) sys.stdout = stdout return rep return do_profiling
Classes
class TestDataRequest11 (methodName='runTest')
-
Expand source code
class TestDataRequest11(unittest.TestCase): def setUp(self): self.profiling = True self.version = "v1.1" export_version = "release" content = get_transformed_content(version=self.version, export=export_version) self.vs_dict = content["VS_input"] self.input_database = content["DR_input"] self.single = f"{_dreq_res}/{self.version}/dreq_{export_version}_export.json" self.single_content = read_json_input_file_content(self.single) self.single_format = correct_dictionaries(self.single_content) @unittest.skip @add_profiling def test_from_separated_inputs(self): obj = DataRequest.from_separated_inputs(DR_input=self.input_database, VS_input=self.vs_dict) @unittest.skip @add_profiling def test_from_single_input(self): obj = DataRequest.from_input(self.single, version=self.version) @unittest.skip @add_profiling def test_correct_dictionaries(self): content = correct_dictionaries(self.single_content) @unittest.skip @add_profiling def test_transform_to_one(self): content = transform_content_inner(self.single_format, get_transform_settings(self.version)) @unittest.skip @add_profiling def test_filter_variables(self): DR = DataRequest.from_separated_inputs(DR_input=self.input_database, VS_input=self.vs_dict) content = DR.find_variables(operation="all", skip_if_missing=False, max_priority_level="Core") @unittest.skip @add_profiling def test_export_summary(self): DR = DataRequest.from_separated_inputs(DR_input=self.input_database, VS_input=self.vs_dict) with tempfile.TemporaryDirectory() as output_dir: DR.find_variables_per_opportunity(DR.get_opportunities()[0]) DR.export_summary("variables", "opportunities", os.sep.join([output_dir, "var_per_op.csv"])) DR.export_summary("variables", "experiments", os.sep.join([output_dir, "var_per_exp.csv"])) DR.export_summary("variables", "experiments", os.sep.join([output_dir, "var_per_exp_prio1.csv"]), filtering_requests=dict(max_priority_level="Core")) @unittest.skip @add_profiling def test_export_data(self): DR = DataRequest.from_separated_inputs(DR_input=self.input_database, VS_input=self.vs_dict) with tempfile.TemporaryDirectory() as output_dir: DR.export_data("opportunities", os.sep.join([output_dir, "op.csv"]), export_columns_request=["name", "lead_theme", "description"])
A class whose instances are single test cases.
By default, the test code itself should be placed in a method named 'runTest'.
If the fixture may be used for many test cases, create as many test methods as are needed. When instantiating such a TestCase subclass, specify in the constructor arguments the name of the test method that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction and deconstruction of the test's environment ('fixture') can be implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the init method, the base class init method must always be called. It is important that subclasses should not change the signature of their init method, since instances of the classes are instantiated automatically by parts of the framework in order to be run.
When subclassing TestCase, you can set these attributes: * failureException: determines which exception will be raised when the instance's assertion methods fail; test methods raising this exception will be deemed to have 'failed' rather than 'errored'. * longMessage: determines whether long messages (including repr of objects used in assert methods) will be printed on failure in addition to any explicit message passed. * maxDiff: sets the maximum length of a diff in failure messages by assert methods using difflib. It is looked up as an instance attribute so can be configured by individual tests if required.
Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name.
Ancestors
- unittest.case.TestCase
Methods
def setUp(self)
-
Expand source code
def setUp(self): self.profiling = True self.version = "v1.1" export_version = "release" content = get_transformed_content(version=self.version, export=export_version) self.vs_dict = content["VS_input"] self.input_database = content["DR_input"] self.single = f"{_dreq_res}/{self.version}/dreq_{export_version}_export.json" self.single_content = read_json_input_file_content(self.single) self.single_format = correct_dictionaries(self.single_content)
Hook method for setting up the test fixture before exercising it.
def test_correct_dictionaries(self, *args, **kwargs)
-
Expand source code
def do_profiling(self, *args, **kwargs): if self.profiling: pr = cProfile.Profile() pr.enable() rep = func(self, *args, **kwargs) if self.profiling: pr.disable() stdout = sys.stdout test_name = str(self) test_name = re.sub(r"(?P<name>.*) .*", r"\g<name>", test_name) file_name = filepath(f"profiling_{test_name}.txt") if os.path.isfile(file_name): os.remove(file_name) with codecs.open(file_name, "w", encoding="utf-8") as statsfile: sys.stdout = statsfile s = io.StringIO() sortby = "cumulative" ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) sys.stdout = stdout return rep
def test_export_data(self, *args, **kwargs)
-
Expand source code
def do_profiling(self, *args, **kwargs): if self.profiling: pr = cProfile.Profile() pr.enable() rep = func(self, *args, **kwargs) if self.profiling: pr.disable() stdout = sys.stdout test_name = str(self) test_name = re.sub(r"(?P<name>.*) .*", r"\g<name>", test_name) file_name = filepath(f"profiling_{test_name}.txt") if os.path.isfile(file_name): os.remove(file_name) with codecs.open(file_name, "w", encoding="utf-8") as statsfile: sys.stdout = statsfile s = io.StringIO() sortby = "cumulative" ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) sys.stdout = stdout return rep
def test_export_summary(self, *args, **kwargs)
-
Expand source code
def do_profiling(self, *args, **kwargs): if self.profiling: pr = cProfile.Profile() pr.enable() rep = func(self, *args, **kwargs) if self.profiling: pr.disable() stdout = sys.stdout test_name = str(self) test_name = re.sub(r"(?P<name>.*) .*", r"\g<name>", test_name) file_name = filepath(f"profiling_{test_name}.txt") if os.path.isfile(file_name): os.remove(file_name) with codecs.open(file_name, "w", encoding="utf-8") as statsfile: sys.stdout = statsfile s = io.StringIO() sortby = "cumulative" ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) sys.stdout = stdout return rep
def test_filter_variables(self, *args, **kwargs)
-
Expand source code
def do_profiling(self, *args, **kwargs): if self.profiling: pr = cProfile.Profile() pr.enable() rep = func(self, *args, **kwargs) if self.profiling: pr.disable() stdout = sys.stdout test_name = str(self) test_name = re.sub(r"(?P<name>.*) .*", r"\g<name>", test_name) file_name = filepath(f"profiling_{test_name}.txt") if os.path.isfile(file_name): os.remove(file_name) with codecs.open(file_name, "w", encoding="utf-8") as statsfile: sys.stdout = statsfile s = io.StringIO() sortby = "cumulative" ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) sys.stdout = stdout return rep
def test_from_separated_inputs(self, *args, **kwargs)
-
Expand source code
def do_profiling(self, *args, **kwargs): if self.profiling: pr = cProfile.Profile() pr.enable() rep = func(self, *args, **kwargs) if self.profiling: pr.disable() stdout = sys.stdout test_name = str(self) test_name = re.sub(r"(?P<name>.*) .*", r"\g<name>", test_name) file_name = filepath(f"profiling_{test_name}.txt") if os.path.isfile(file_name): os.remove(file_name) with codecs.open(file_name, "w", encoding="utf-8") as statsfile: sys.stdout = statsfile s = io.StringIO() sortby = "cumulative" ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) sys.stdout = stdout return rep
def test_from_single_input(self, *args, **kwargs)
-
Expand source code
def do_profiling(self, *args, **kwargs): if self.profiling: pr = cProfile.Profile() pr.enable() rep = func(self, *args, **kwargs) if self.profiling: pr.disable() stdout = sys.stdout test_name = str(self) test_name = re.sub(r"(?P<name>.*) .*", r"\g<name>", test_name) file_name = filepath(f"profiling_{test_name}.txt") if os.path.isfile(file_name): os.remove(file_name) with codecs.open(file_name, "w", encoding="utf-8") as statsfile: sys.stdout = statsfile s = io.StringIO() sortby = "cumulative" ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) sys.stdout = stdout return rep
def test_transform_to_one(self, *args, **kwargs)
-
Expand source code
def do_profiling(self, *args, **kwargs): if self.profiling: pr = cProfile.Profile() pr.enable() rep = func(self, *args, **kwargs) if self.profiling: pr.disable() stdout = sys.stdout test_name = str(self) test_name = re.sub(r"(?P<name>.*) .*", r"\g<name>", test_name) file_name = filepath(f"profiling_{test_name}.txt") if os.path.isfile(file_name): os.remove(file_name) with codecs.open(file_name, "w", encoding="utf-8") as statsfile: sys.stdout = statsfile s = io.StringIO() sortby = "cumulative" ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) sys.stdout = stdout return rep