diff --git a/kmip/services/server/monitor.py b/kmip/services/server/monitor.py new file mode 100644 index 0000000..5239765 --- /dev/null +++ b/kmip/services/server/monitor.py @@ -0,0 +1,162 @@ +# Copyright (c) 2018 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import multiprocessing +import os +import signal +import time + +from kmip.core import policy as operation_policy + + +def get_json_files(p): + """ + Scan the provided policy directory for all JSON policy files. + """ + f = [os.path.join(p, x) for x in os.listdir(p) if x.endswith(".json")] + return sorted(f) + + +class PolicyDirectoryMonitor(multiprocessing.Process): + """ + A file monitor that tracks modifications made within the policy directory. + """ + + def __init__(self, policy_directory, policy_store): + """ + Set up the file monitor with the policy directory to track. + + Args: + policy_directory (string): The system path of the policy directory + that should be monitored. Required. + policy_store (DictProxy): A dictionary proxy created by the server + multiprocessing resource manager. Used to store and share the + policy information across server processes and threads. + Required. + """ + super(PolicyDirectoryMonitor, self).__init__() + + self.halt_trigger = multiprocessing.Event() + self.policy_directory = policy_directory + + self.file_timestamps = None + self.policy_cache = None + self.policy_files = None + self.policy_map = None + self.policy_store = policy_store + + self.reserved_policies = ['default', 'public'] + + def interrupt_handler(trigger, frame): + self.stop() + signal.signal(signal.SIGINT, interrupt_handler) + signal.signal(signal.SIGTERM, interrupt_handler) + + self.logger = logging.getLogger("kmip.server.monitor") + self.initialize_tracking_structures() + + def stop(self): + self.halt_trigger.set() + + def run(self): + """ + Start monitoring operation policy files. + """ + self.initialize_tracking_structures() + + self.logger.info("Starting up the operation policy file monitor.") + while not self.halt_trigger.is_set(): + time.sleep(1) + + policy_files = get_json_files(self.policy_directory) + for f in set(policy_files) - set(self.policy_files): + self.file_timestamps[f] = 0 + for f in set(self.policy_files) - set(policy_files): + self.logger.info("Removing policies for file: {}".format(f)) + self.file_timestamps.pop(f, None) + for p in self.policy_cache.keys(): + self.disassociate_policy_and_file(p, f) + for p in [k for k, v in self.policy_map.items() if v == f]: + self.restore_or_delete_policy(p) + self.policy_files = policy_files + + for f in sorted(self.file_timestamps.keys()): + t = os.path.getmtime(f) + if t > self.file_timestamps[f]: + self.logger.info("Loading policies for file: {}".format(f)) + self.file_timestamps[f] = t + old_p = [k for k, v in self.policy_map.items() if v == f] + try: + new_p = operation_policy.read_policy_from_file(f) + except ValueError: + self.logger.error("Failure loading file: {}".format(f)) + self.logger.debug("", exc_info=True) + continue + for p in new_p.keys(): + self.logger.info("Loading policy: {}".format(p)) + if p in self.reserved_policies: + self.logger.warning( + "Policy '{}' overwrites a reserved policy and " + "will be thrown out.".format(p) + ) + continue + if p in sorted(self.policy_store.keys()): + self.logger.debug( + "Policy '{}' overwrites an existing " + "policy.".format(p) + ) + if f != self.policy_map.get(p): + self.policy_cache.get(p).append( + ( + time.time(), + self.policy_map.get(p), + self.policy_store.get(p) + ) + ) + else: + self.policy_cache[p] = [] + self.policy_store[p] = new_p.get(p) + self.policy_map[p] = f + for p in set(old_p) - set(new_p.keys()): + self.disassociate_policy_and_file(p, f) + self.restore_or_delete_policy(p) + self.logger.info("Stopping the operation policy file monitor.") + + def initialize_tracking_structures(self): + self.file_timestamps = {} + self.policy_cache = {} + self.policy_files = [] + self.policy_map = {} + + for k in self.policy_store.keys(): + self.policy_store.pop(k, None) + + def disassociate_policy_and_file(self, policy, file_name): + c = self.policy_cache.get(policy, []) + for i in [c.index(e) for e in c if e[1] == file_name][::-1]: + c.pop(i) + + def restore_or_delete_policy(self, policy): + c = self.policy_cache.get(policy, []) + if len(c) == 0: + self.logger.info("Removing policy: {}".format(policy)) + self.policy_store.pop(policy, None) + self.policy_map.pop(policy, None) + self.policy_cache.pop(policy, None) + else: + e = c.pop() + self.policy_store[policy] = e[2] + self.policy_map[policy] = e[1] diff --git a/kmip/tests/unit/services/server/test_monitor.py b/kmip/tests/unit/services/server/test_monitor.py new file mode 100644 index 0000000..23aba0b --- /dev/null +++ b/kmip/tests/unit/services/server/test_monitor.py @@ -0,0 +1,2070 @@ +# Copyright (c) 2018 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import mock +import multiprocessing +import os +import shutil +import signal +import tempfile +import testtools + +from kmip.core import enums +from kmip.services.server import monitor + + +class TestMonitorUtilities(testtools.TestCase): + + def setUp(self): + super(TestMonitorUtilities, self).setUp() + + self.tmp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp_dir) + + def test_get_json_files(self): + """ + Test that all files ending in .json can be collected from a directory. + """ + with open(os.path.join(self.tmp_dir, "policy_1.json"), "w") as f: + f.write('{"policy_1": {}}\n') + with open(os.path.join(self.tmp_dir, "policy_2.json"), "w") as f: + f.write('{"policy_2": {}}\n') + with open(os.path.join(self.tmp_dir, "policy_3.txt"), "w") as f: + f.write('{"policy_3": {}}\n') + + result = monitor.get_json_files(self.tmp_dir) + + self.assertIsInstance(result, list) + self.assertEqual(2, len(result)) + self.assertIn(os.path.join(self.tmp_dir, "policy_1.json"), result) + self.assertIn(os.path.join(self.tmp_dir, "policy_2.json"), result) + + +POLICY_1 = """ +{ + "policy_A": { + "groups": { + "group_A": { + "SYMMETRIC_KEY": { + "GET": "ALLOW_ALL", + "DESTROY": "ALLOW_ALL" + } + } + } + } +} +""" +POLICY_2 = """ +{ + "policy_B": { + "groups": { + "group_B": { + "SYMMETRIC_KEY": { + "GET": "ALLOW_ALL", + "LOCATE": "ALLOW_ALL", + "DESTROY": "ALLOW_ALL" + } + } + } + }, + "policy_C": { + "groups": { + "group_C": { + "SYMMETRIC_KEY": { + "GET": "ALLOW_ALL", + "DESTROY": "DISALLOW_ALL" + } + } + } + } +} +""" +POLICY_3 = """ +{ + "policy_B": { + "groups": { + "group_B": { + "SYMMETRIC_KEY": { + "GET": "DISALLOW_ALL", + "LOCATE": "DISALLOW_ALL", + "DESTROY": "DISALLOW_ALL" + } + } + } + } +} +""" +POLICY_4 = """ +{ + "default": { + "groups": { + "group_B": { + "SYMMETRIC_KEY": { + "GET": "DISALLOW_ALL", + "LOCATE": "DISALLOW_ALL", + "DESTROY": "DISALLOW_ALL" + } + } + } + } +} +""" +POLICY_5 = """ +{ + "policy_B": { + "groups": { + "group_B": { + "SYMMETRIC_KEY": { + "GET": "ALLOW_ALL", + "LOCATE": "ALLOW_ALL", + "DESTROY": "ALLOW_ALL" + } + } + } + }, + "policy_D": { + "groups": { + "group_D": { + "SYMMETRIC_KEY": { + "GET": "ALLOW_ALL", + "DESTROY": "DISALLOW_ALL" + } + } + } + } +} +""" +POLICY_6 = """ +{ + "policy_A": { + "groups": { + "group_A": { + "SYMMETRIC_KEY": { + "GET": "ALLOW_ALL", + "DESTROY": "ALLOW_ALL" + } + } + } + }, + "policy_E": { + "groups": { + "group_E": { + "SYMMETRIC_KEY": { + "GET": "ALLOW_ALL", + "CHECK": "ALLOW_OWNER", + "DESTROY": "ALLOW_ALL" + } + } + } + } +} +""" +POLICY_7 = """ +{ + "policy_D": { + "groups": { + "group_D": { + "SYMMETRIC_KEY": { + "GET": "DISALLOW_ALL", + "LOCATE": "DISALLOW_ALL", + "DESTROY": "DISALLOW_ALL" + } + } + } + } +} +""" + + +def write_file(path, file_name, content): + with open(os.path.join(path, file_name), "w") as f: + f.write("{}\n".format(content)) + + +def side_effects(effects): + for effect in effects: + if isinstance(effect, bool): + yield effect + else: + effect() + yield False + + +def build_write_effect(path, file_name, content): + def side_effect(): + write_file(path, file_name, content) + return side_effect + + +def build_delete_effect(path, file_name): + def side_effect(): + os.remove(os.path.join(path, file_name)) + return side_effect + + +class TestPolicyDirectoryMonitor(testtools.TestCase): + + def setUp(self): + super(TestPolicyDirectoryMonitor, self).setUp() + + self.tmp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp_dir) + + def test_init(self): + """ + Test that the PolicyDirectoryMonitor can be instantiated without error. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + + self.assertIsInstance( + m.halt_trigger, + multiprocessing.synchronize.Event + ) + self.assertEqual(self.tmp_dir, m.policy_directory) + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + self.assertEqual(['default', 'public'], m.reserved_policies) + self.assertIsInstance(m.logger, logging.Logger) + + def test_signal_handler(self): + """ + Test that the signal handler for SIGINT and SIGTERM correctly stops + the monitor. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.stop = mock.MagicMock() + handler = signal.getsignal(signal.SIGINT) + + m.stop.assert_not_called() + handler(None, None) + m.stop.assert_called() + + def test_stop(self): + """ + Test that the PolicyDirectoryMonitor processes stop calls correctly. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + + self.assertFalse(m.halt_trigger.is_set()) + + m.stop() + + self.assertTrue(m.halt_trigger.is_set()) + + def test_run(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = [False, True] + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + write_file(self.tmp_dir, "policy_2.json", POLICY_2) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(2, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + self.assertEqual( + { + "policy_A": [], + "policy_B": [], + "policy_C": [] + }, + m.policy_cache + ) + + self.assertEqual(3, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + + def test_run_with_policy_overloading(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when one policy overloads another existing policy. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = [False, True] + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + write_file(self.tmp_dir, "policy_2.json", POLICY_2) + write_file(self.tmp_dir, "policy_3.json", POLICY_3) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_3.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.debug.assert_any_call( + "Policy 'policy_B' overwrites an existing policy." + ) + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(3, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + path = os.path.join(self.tmp_dir, "policy_3.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + + cache = m.policy_cache.get("policy_A") + self.assertEqual(0, len(cache)) + cache = m.policy_cache.get("policy_B") + self.assertEqual(1, len(cache)) + self.assertEqual( + os.path.join(self.tmp_dir, "policy_2.json"), + cache[0][1] + ) + self.assertEqual( + { + 'groups': { + 'group_B': { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: + enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: + enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: + enums.Policy.ALLOW_ALL + } + } + } + }, + cache[0][2] + ) + # self.assertEqual( + # { + # 'policy_A': [], + # 'policy_B': [ + # ( + # 1480043060.870089, + # os.path.join(self.tmp_dir, "policy_2.json"), + # { + # 'groups': { + # 'group_B': { + # enums.ObjectType.SYMMETRIC_KEY: { + # enums.Operation.GET: + # enums.Policy.ALLOW_ALL, + # enums.Operation.LOCATE: + # enums.Policy.ALLOW_ALL, + # enums.Operation.DESTROY: + # enums.Policy.ALLOW_ALL + # } + # } + # } + # } + # ) + # ], + # 'policy_C': [] + # }, + # m.policy_cache + # ) + + self.assertEqual(3, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.DISALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.DISALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + + def test_run_with_policy_load_failure(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when one policy can't be loaded properly. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = [False, True] + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + write_file(self.tmp_dir, "policy_2.json", "not a JSON blob") + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.error.assert_any_call( + "Failure loading file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.debug.assert_called() + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(2, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + + self.assertEqual( + { + "policy_A": [] + }, + m.policy_cache + ) + + self.assertEqual(1, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + + def test_run_with_policy_load_failure_and_fix(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when one policy can't be loaded properly and is + then fixed while tracking is active. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = side_effects( + [ + False, + build_write_effect( + self.tmp_dir, + "policy_2.json", + "invalid JSON" + ), + False, + build_write_effect(self.tmp_dir, "policy_2.json", POLICY_2), + False, + True + ] + ) + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.error.assert_any_call( + "Failure loading file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.debug.assert_called() + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(2, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + self.assertEqual( + { + "policy_A": [], + "policy_B": [], + "policy_C": [] + }, + m.policy_cache + ) + + self.assertEqual(3, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + + def test_run_with_policy_overloading_reserved(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when one policy can't be loaded properly. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = [False, True] + + write_file(self.tmp_dir, "policy_3.json", POLICY_3) + write_file(self.tmp_dir, "policy_4.json", POLICY_4) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_3.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_4.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: default") + m.logger.warning.assert_any_call( + "Policy 'default' overwrites a reserved policy and will be " + "thrown out." + ) + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(2, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_3.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + + path = os.path.join(self.tmp_dir, "policy_4.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + + self.assertEqual( + { + "policy_B": [] + }, + m.policy_cache + ) + + self.assertEqual(1, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.DISALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.DISALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + + def test_run_with_edit_modifying_existing_file(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when an existing policy file is modified while + tracking is active. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = side_effects( + [ + False, + build_write_effect(self.tmp_dir, "policy_2.json", POLICY_5), + True + ] + ) + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + write_file(self.tmp_dir, "policy_2.json", POLICY_2) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_D") + m.logger.info.assert_any_call("Removing policy: policy_C") + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(2, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + self.assertEqual(path, m.policy_map.get("policy_D", None)) + + self.assertEqual( + { + "policy_A": [], + "policy_B": [], + "policy_D": [] + }, + m.policy_cache + ) + + self.assertEqual(3, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_D": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_D", None) + ) + + def test_run_with_edit_adding_to_existing_file(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when an existing policy file is added to while + tracking is active. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = side_effects( + [ + False, + build_write_effect(self.tmp_dir, "policy_1.json", POLICY_6), + True + ] + ) + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + write_file(self.tmp_dir, "policy_2.json", POLICY_2) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call("Loading policy: policy_E") + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(2, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + self.assertEqual(path, m.policy_map.get("policy_E", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + self.assertEqual( + { + "policy_A": [], + "policy_B": [], + "policy_C": [], + "policy_E": [] + }, + m.policy_cache + ) + + self.assertEqual(4, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + self.assertEqual( + { + "groups": { + "group_E": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.CHECK: enums.Policy.ALLOW_OWNER, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_E", None) + ) + + def test_run_with_edit_deleting_from_existing_file(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when an existing policy file has content removed + while tracking is active. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = side_effects( + [ + False, + build_write_effect(self.tmp_dir, "policy_1.json", POLICY_1), + True + ] + ) + + write_file(self.tmp_dir, "policy_1.json", POLICY_6) + write_file(self.tmp_dir, "policy_2.json", POLICY_2) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call("Loading policy: policy_E") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call("Removing policy: policy_E") + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(2, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + self.assertEqual( + { + "policy_A": [], + "policy_B": [], + "policy_C": [] + }, + m.policy_cache + ) + + self.assertEqual(3, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + + def test_run_with_deleting_existing_file(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when an existing policy file is removed while + tracking is active. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = side_effects( + [ + False, + build_delete_effect(self.tmp_dir, "policy_1.json"), + True + ] + ) + + write_file(self.tmp_dir, "policy_1.json", POLICY_6) + write_file(self.tmp_dir, "policy_2.json", POLICY_2) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call("Loading policy: policy_E") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Removing policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Removing policy: policy_A") + m.logger.info.assert_any_call("Removing policy: policy_E") + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(1, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(2, len(m.policy_map.keys())) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + self.assertEqual( + { + "policy_B": [], + "policy_C": [] + }, + m.policy_cache + ) + + self.assertEqual(2, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + + def test_run_with_adding_new_file(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when a new policy file is added while tracking is + active. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = side_effects( + [ + False, + build_write_effect(self.tmp_dir, "policy_2.json", POLICY_2), + True + ] + ) + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.debug.assert_not_called() + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(2, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + self.assertEqual( + { + "policy_A": [], + "policy_B": [], + "policy_C": [] + }, + m.policy_cache + ) + + self.assertEqual(3, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + + def test_run_with_adding_new_file_overloading(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when new policy files are added overwritting + existing policies while tracking is active. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = side_effects( + [ + False, + build_write_effect(self.tmp_dir, "policy_3.json", POLICY_2), + build_write_effect(self.tmp_dir, "policy_4.json", POLICY_3), + True + ] + ) + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + write_file(self.tmp_dir, "policy_2.json", POLICY_2) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_3.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.debug.assert_any_call( + "Policy 'policy_B' overwrites an existing policy." + ) + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.debug.assert_any_call( + "Policy 'policy_C' overwrites an existing policy." + ) + + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_4.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.debug.assert_any_call( + "Policy 'policy_B' overwrites an existing policy." + ) + + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(4, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_2.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + + path = os.path.join(self.tmp_dir, "policy_3.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + path = os.path.join(self.tmp_dir, "policy_4.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + + self.assertEqual([], m.policy_cache.get("policy_A")) + cache = m.policy_cache.get("policy_B") + self.assertEqual(2, len(cache)) + self.assertEqual( + os.path.join(self.tmp_dir, "policy_2.json"), + cache[0][1] + ) + self.assertEqual( + { + 'groups': { + 'group_B': { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: + enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: + enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: + enums.Policy.ALLOW_ALL + } + } + } + }, + cache[0][2] + ) + self.assertEqual( + os.path.join(self.tmp_dir, "policy_3.json"), + cache[1][1] + ) + self.assertEqual( + { + 'groups': { + 'group_B': { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: + enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: + enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: + enums.Policy.ALLOW_ALL + } + } + } + }, + cache[1][2] + ) + cache = m.policy_cache.get("policy_C") + self.assertEqual(1, len(cache)) + self.assertEqual( + os.path.join(self.tmp_dir, "policy_2.json"), + cache[0][1] + ) + self.assertEqual( + { + 'groups': { + 'group_C': { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: + enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: + enums.Policy.DISALLOW_ALL + } + } + } + }, + cache[0][2] + ) + + self.assertEqual(3, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.DISALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.DISALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + + def test_run_with_adding_new_file_editing_overloading(self): + """ + Test that the PolicyDirectoryMonitor can load policy files and track + them properly, even when new policy files are added overwritting + existing policies while tracking is active. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + m.halt_trigger = mock.MagicMock(multiprocessing.synchronize.Event) + m.halt_trigger.is_set.side_effect = side_effects( + [ + False, + build_write_effect(self.tmp_dir, "policy_3.json", POLICY_2), + build_write_effect(self.tmp_dir, "policy_4.json", POLICY_3), + build_delete_effect(self.tmp_dir, "policy_2.json"), + build_write_effect(self.tmp_dir, "policy_4.json", POLICY_7), + True + ] + ) + + write_file(self.tmp_dir, "policy_1.json", POLICY_1) + write_file(self.tmp_dir, "policy_2.json", POLICY_2) + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + m.run() + + m.logger.info.assert_any_call( + "Starting up the operation policy file monitor." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_1.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_A") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_3.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.debug.assert_any_call( + "Policy 'policy_B' overwrites an existing policy." + ) + m.logger.info.assert_any_call("Loading policy: policy_C") + m.logger.debug.assert_any_call( + "Policy 'policy_C' overwrites an existing policy." + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_4.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_B") + m.logger.debug.assert_any_call( + "Policy 'policy_B' overwrites an existing policy." + ) + m.logger.info.assert_any_call( + "Removing policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_2.json") + ) + ) + m.logger.info.assert_any_call( + "Loading policies for file: {}".format( + os.path.join(self.tmp_dir, "policy_4.json") + ) + ) + m.logger.info.assert_any_call("Loading policy: policy_D") + m.logger.info.assert_any_call( + "Stopping the operation policy file monitor." + ) + + self.assertEqual(3, len(m.policy_files)) + path = os.path.join(self.tmp_dir, "policy_1.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_A", None)) + + path = os.path.join(self.tmp_dir, "policy_3.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_B", None)) + self.assertEqual(path, m.policy_map.get("policy_C", None)) + + path = os.path.join(self.tmp_dir, "policy_4.json") + self.assertEqual( + os.path.getmtime(path), + m.file_timestamps.get(path, None) + ) + self.assertIn(path, m.policy_files) + self.assertEqual(path, m.policy_map.get("policy_D", None)) + + self.assertEqual([], m.policy_cache.get("policy_A")) + self.assertEqual([], m.policy_cache.get("policy_B")) + self.assertEqual([], m.policy_cache.get("policy_C")) + + self.assertEqual(4, len(m.policy_store.keys())) + self.assertEqual( + { + "groups": { + "group_A": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_A", None) + ) + self.assertEqual( + { + "groups": { + "group_B": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.ALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_B", None) + ) + self.assertEqual( + { + "groups": { + "group_C": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.ALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_C", None) + ) + self.assertEqual( + { + "groups": { + "group_D": { + enums.ObjectType.SYMMETRIC_KEY: { + enums.Operation.GET: enums.Policy.DISALLOW_ALL, + enums.Operation.LOCATE: enums.Policy.DISALLOW_ALL, + enums.Operation.DESTROY: enums.Policy.DISALLOW_ALL + } + } + } + }, + m.policy_store.get("policy_D", None) + ) + + def test_initialize_tracking_structures(self): + """ + Test that the PolicyDirectoryMonitor can correctly initialize/reset the + various tracking structures used for file monitoring. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + + m.file_timestamps["a"] = 1234 + m.policy_cache["a"] = (123.12, "b", {"c": 2}) + m.policy_files = ["a", "b"] + m.policy_map["a"] = "b" + m.policy_store["a"] = {"c": 2} + + m.initialize_tracking_structures() + + self.assertEqual({}, m.file_timestamps) + self.assertEqual({}, m.policy_cache) + self.assertEqual([], m.policy_files) + self.assertEqual({}, m.policy_map) + self.assertEqual([], m.policy_store.keys()) + + def test_disassociate_policy_and_file(self): + """ + Test that the PolicyDirectoryMonitor can correctly unlink a policy and + a policy file in its tracking structures. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + + m.policy_cache = { + "policy_A": [ + ( + 1480043060.870089, + os.path.join(self.tmp_dir, "policy_1.json"), + {} + ), + ( + 1480043062.02171, + os.path.join(self.tmp_dir, "policy_2.json"), + {} + ), + ( + 1480043062.645776, + os.path.join(self.tmp_dir, "policy_1.json"), + {} + ), + ( + 1480043063.453713, + os.path.join(self.tmp_dir, "policy_3.json"), + {} + ) + ], + "policy_B": [ + ( + 1480043123.65311, + os.path.join(self.tmp_dir, "policy_1.json"), + {} + ) + ] + } + + m.disassociate_policy_and_file( + "policy_A", + os.path.join(self.tmp_dir, "policy_1.json") + ) + + self.assertEqual( + [ + ( + 1480043062.02171, + os.path.join(self.tmp_dir, "policy_2.json"), + {} + ), + ( + 1480043063.453713, + os.path.join(self.tmp_dir, "policy_3.json"), + {} + ) + ], + m.policy_cache.get("policy_A", []) + ) + self.assertEqual( + [ + ( + 1480043123.65311, + os.path.join(self.tmp_dir, "policy_1.json"), + {} + ) + ], + m.policy_cache.get("policy_B", []) + ) + + def test_restore_or_delete_policy_restore(self): + """ + Test that the PolicyDirectoryMonitor can correctly restore policy data + upon a policy file change. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + + m.policy_cache = { + "policy_A": [ + ( + 1480043060.870089, + os.path.join(self.tmp_dir, "policy_1.json"), + {'{"policy_1"}'} + ), + ( + 1480043062.02171, + os.path.join(self.tmp_dir, "policy_2.json"), + {'{"policy_2"}'} + ), + ( + 1480043063.453713, + os.path.join(self.tmp_dir, "policy_3.json"), + {'{"policy_3"}'} + ) + ] + } + m.policy_store["policy_A"] = {'{"policy_4"}'} + m.policy_map["policy_A"] = os.path.join(self.tmp_dir, "policy_4.json") + + m.restore_or_delete_policy("policy_A") + + m.logger.info.assert_not_called() + self.assertEqual( + [ + ( + 1480043060.870089, + os.path.join(self.tmp_dir, "policy_1.json"), + {'{"policy_1"}'} + ), + ( + 1480043062.02171, + os.path.join(self.tmp_dir, "policy_2.json"), + {'{"policy_2"}'} + ) + ], + m.policy_cache.get("policy_A", []) + ) + self.assertEqual( + {'{"policy_3"}'}, + m.policy_store.get("policy_A", {}) + ) + self.assertEqual( + os.path.join(self.tmp_dir, "policy_3.json"), + m.policy_map.get("policy_A", None) + ) + + def test_restore_or_delete_policy_delete(self): + """ + Test that the PolicyDirectoryMonitor can correctly delete policy data + upon a policy file change. + """ + m = monitor.PolicyDirectoryMonitor( + self.tmp_dir, + multiprocessing.Manager().dict() + ) + m.logger = mock.MagicMock(logging.Logger) + + m.policy_cache = { + "policy_A": [] + } + m.policy_store["policy_A"] = {'{"policy_4"}'} + m.policy_map["policy_A"] = os.path.join(self.tmp_dir, "policy_4.json") + + m.restore_or_delete_policy("policy_A") + + m.logger.info.assert_called_once_with("Removing policy: policy_A") + self.assertNotIn("policy_A", m.policy_cache.keys()) + self.assertNotIn("policy_A", m.policy_store.keys()) + self.assertNotIn("policy_A", m.policy_map.keys())