diff --git a/ipsframework/ips.py b/ipsframework/ips.py index d8ef476f..efe49b9d 100755 --- a/ipsframework/ips.py +++ b/ipsframework/ips.py @@ -539,6 +539,30 @@ def run(self): self.event_service._print_stats() return True + def initiate_new_simulation(self, sim_name): + ''' + This is to be called by the configuration manager as part of dynamically creating + a new simulation. The purpose here is to initiate the method invocations for the + framework-visible components in the new simulation + ''' + comp_list = self.config_manager.get_simulation_components(sim_name) + msg_list = [] + self._send_monitor_event(sim_name, 'IPS_START', 'Starting IPS Simulation', ok=True) + self._send_dynamic_sim_event(sim_name=sim_name, event_type='IPS_START') + for comp_id in comp_list: + for method in ['init', 'step', 'finalize']: + req_msg = ServiceRequestMessage(self.component_id, + self.component_id, comp_id, + 'init_call', method, 0) + msg_list.append(req_msg) + + # send off first round of invocations... + msg = msg_list.pop(0) + self.debug('Framework sending message %s ', msg.__dict__) + call_id = self.task_manager.init_call(msg, manage_return=False) + self.call_queue_map[call_id] = msg_list + self.outstanding_calls_list.append(call_id) + def _send_monitor_event(self, sim_name='', eventType='', comment='', ok='True'): """ Publish a portal monitor event to the *_IPS_MONITOR* event topic. diff --git a/ipsframework/ips_dakota_client.py b/ipsframework/ips_dakota_client.py index ef472ead..3ecacaac 100755 --- a/ipsframework/ips_dakota_client.py +++ b/ipsframework/ips_dakota_client.py @@ -101,8 +101,8 @@ def run(self): def main(argv=None): - in_file = argv[1] - out_file = argv[2] + in_file = sys.argv[1] + out_file = sys.argv[2] debug = False log_file_name = None try: @@ -126,4 +126,4 @@ def main(argv=None): if __name__ == "__main__": sys.stdout.flush() - sys.exit(main(sys.argv)) + sys.exit(main()) diff --git a/ipsframework/ips_dakota_dynamic.py b/ipsframework/ips_dakota_dynamic.py index 27629eb8..dd830244 100755 --- a/ipsframework/ips_dakota_dynamic.py +++ b/ipsframework/ips_dakota_dynamic.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python # ------------------------------------------------------------------------------- # Copyright 2006-2021 UT-Battelle, LLC. See LICENSE for more information. # ------------------------------------------------------------------------------- @@ -89,10 +89,6 @@ def run(self): # noqa: C901 if k not in list(self.template_conf.keys()) and not any(x in v for x in '{}()$'): self.template_conf[k] = v - alt_paths.append(self.template_conf['IPS_ROOT']) - alt_paths.append(os.path.join(self.template_conf['IPS_ROOT'], 'bin')) - alt_paths.append(os.path.join(self.template_conf['IPS_ROOT'], 'framework/src')) - new_dakota_config = self.dakota_cfg + '.resolved' comp_vars = {} for line in self.dakota_conf: @@ -113,7 +109,7 @@ def run(self): # noqa: C901 elif tokens[0] == 'analysis_driver': raw_prog = line.split('=')[1] prog = raw_prog.strip(' "\'') - exec_prog = which(prog, alt_paths) + exec_prog = which(prog) if not exec_prog: raise Exception('Error: analysis driver %s not found in path' % prog) line.replace(prog, exec_prog) @@ -136,23 +132,13 @@ def run(self): # noqa: C901 driver_conf['SUB_CLASS'] = 'BRIDGE' driver_conf['NAME'] = 'Driver' driver_conf['NPROC'] = 1 - driver_conf['BIN_PATH'] = os.path.join(self.template_conf['IPS_ROOT'], 'bin') + driver_conf['BIN_PATH'] = '' driver_conf['BIN_DIR'] = driver_conf['BIN_PATH'] driver_conf['INPUT_DIR'] = '/dev/null' driver_conf['INPUT_FILES'] = '' driver_conf['OUTPUT_FILES'] = '' - script = os.path.join(self.template_conf['IPS_ROOT'], - 'bin', 'dakota_bridge.py') - if os.path.isfile(script): - driver_conf['SCRIPT'] = script - else: - script = os.path.join(self.template_conf['IPS_ROOT'], 'framework', - 'src', 'dakota_bridge.py') - if os.path.isfile(script): - driver_conf['SCRIPT'] = script - else: - raise Exception('Error: unable to locate dakota_bridge.py in \ -IPS_ROOT/bin or IPS_ROOT/framework/src') + driver_conf['SCRIPT'] = "" + driver_conf['MODULE'] = 'ipsframework.dakota_bridge' self.master_conf['DAKOTA_BRIDGE'] = driver_conf for comp in comp_vars: @@ -200,8 +186,8 @@ def run(self): # noqa: C901 if not os.path.isfile(self.restart_file): raise Exception("Error accessing DAKOTA restart file %s" % (self.restart_file)) - cmd = '%s --all --simulation=%s --platform=%s --verbose' % (ips, self.master_conf.filename, - os.environ['IPS_DAKOTA_platform']) + cmd = '%s --simulation=%s --platform=%s --verbose' % (ips, self.master_conf.filename, + os.environ['IPS_DAKOTA_platform']) if self.log_file: cmd += ' --log=' + self.log_file @@ -209,7 +195,7 @@ def run(self): # noqa: C901 cmd += ' --debug' print('cmd =', cmd) - ips_server_proc = subprocess.Popen(cmd) + ips_server_proc = subprocess.Popen(cmd.split()) print('%s Launched IPS' % (time.strftime("%b %d %Y %H:%M:%S", time.localtime()))) sys.stdout.flush() msg = {'SIMSTATUS': 'START'} @@ -240,7 +226,7 @@ def run(self): # noqa: C901 else: command = 'dakota %s ' % new_dakota_config dakota_logfile = open('dakota_%s.log' % (str(os.getpid())), 'w') - proc = subprocess.Popen(command, stdout=dakota_logfile, stderr=subprocess.STDOUT) + proc = subprocess.Popen(command.split(), stdout=dakota_logfile, stderr=subprocess.STDOUT) print('%s Launched DAKOTA' % (time.strftime("%b %d %Y %H:%M:%S", time.localtime()))) sys.stdout.flush() proc.wait() diff --git a/setup.cfg b/setup.cfg index fbcf2ced..24af6d0b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,7 +22,6 @@ omit = */configobj.py */six.py */_version.py - */*dakota* */utils/HTML.py [versioneer] diff --git a/setup.py b/setup.py index 71df4a25..1150cc0f 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,9 @@ packages=find_packages(), entry_points={ 'console_scripts': [ - 'ips.py = ipsframework.ips:main' + 'ips.py = ipsframework.ips:main', + 'ips_dakota_dynamic.py = ipsframework.ips_dakota_dynamic:main', + 'ips_dakota_client.py = ipsframework.ips_dakota_client:main' ] }, classifiers=[ diff --git a/tests/dakota/README b/tests/dakota/README new file mode 100644 index 00000000..03d902f8 --- /dev/null +++ b/tests/dakota/README @@ -0,0 +1,2 @@ +To run this test manually do: +ips_dakota_dynamic.py --dakotaconfig=dakota_test_Rosenbrock.in --simulation=dakota_test_Rosenbrock.ips --platform=workstation.conf diff --git a/tests/dakota/dakota_test_Rosenbrock.in b/tests/dakota/dakota_test_Rosenbrock.in new file mode 100644 index 00000000..5ef7ebdf --- /dev/null +++ b/tests/dakota/dakota_test_Rosenbrock.in @@ -0,0 +1,36 @@ +# DAKOTA INPUT FILE - dakota_rosenbrock_syscall.in + +strategy + single_method + graphics + tabular_graphics_data + +method + conmin_frcg + max_iterations = 100 + convergence_tolerance = 1e-4 + speculative + +model + single + +variables + continuous_design = 2 + initial_point 0.5 1.0 + lower_bounds 0.0 0.0 + upper_bounds 2.0 2.0 + descriptors 'ROSE__X1' "ROSE__X2" + +interface + fork asynchronous evaluation_concurrency = 10, file_tag, file_save + analysis_driver = 'ips_dakota_client.py' + parameters_file = 'params.in' + results_file = 'results.out' + +responses + num_objective_functions = 1 + numerical_gradients + method_source dakota + interval_type forward + fd_gradient_step_size = 1.e-5 + no_hessians diff --git a/tests/dakota/dakota_test_Rosenbrock.ips b/tests/dakota/dakota_test_Rosenbrock.ips new file mode 100644 index 00000000..a241cc36 --- /dev/null +++ b/tests/dakota/dakota_test_Rosenbrock.ips @@ -0,0 +1,68 @@ +RUN_ID = DAKOTA_Rosenbrock # Identifier for this simulation run +TOKAMAK_ID = TEST +SHOT_NUMBER = 1 # Numerical identifier for specific case + +SIM_NAME = ${RUN_ID}_${TOKAMAK_ID}_${SHOT_NUMBER} # Name of current simulation +SIM_ROOT = $PWD/${SIM_NAME} # Where to put results from this simulation + +LOG_FILE = $SIM_ROOT/${RUN_ID}.log +LOG_LEVEL = DEBUG + +SIMULATION_MODE = NORMAL + +# A run comment picked up by the portal +RUN_COMMENT = Testing dakota + +# Specification of plasma state files + +# Where to put plasma state files as the simulation evolves +PLASMA_STATE_WORK_DIR = $SIM_ROOT/work/plasma_state + +# Specify what files constitute the plasma state - N.B. not all components need all files +PLASMA_STATE_FILES = + +# Names of ports to be used. An implementation and configuration must be specified for +# each port + +[PORTS] + NAMES = DRIVER + +# DRIVER port is called by the framework. It is required, causes exception. + + [[DRIVER]] # REQUIRED Port section + IMPLEMENTATION = ROSE + +# INIT port is called by the framework. It typically produces the very first set of +# plasma state files for SIMULATION_MODE = NORMAL. It does not raise and exception +# if missing. + + [[INIT]] + IMPLEMENTATION = + +# Specification of IMPLEMENTATION for each physics port called out in PORTS list. +# Additional specifications may be present that are not in the PORTS list + +# Specification of configuration for each port called out in PORTS list. +# Additional specifications may be present that are not in the PORTS list +# NAME variable MUST match the name of the python class that implements the component + +[ROSE] + CLASS = DAKOTA + SUB_CLASS = TEST + NAME = ResenbrockDriver + NPROC = 1 + BIN_PATH = + INPUT_DIR = + INPUT_FILES = + OUTPUT_FILES = + SCRIPT = $PWD/dakota_test_Rosenbrock.py + +# Time loop specification (two modes for now) EXPLICIT | REGULAR +# For MODE = REGULAR, the framework uses the variables START, FINISH, and NSTEP +# For MODE = EXPLICIT, the frame work uses the variable VALUES (space separated list of time values) + +[TIME_LOOP] + MODE = REGULAR + START = 0 + FINISH = 10 + NSTEP = 10 diff --git a/tests/dakota/dakota_test_Rosenbrock.py b/tests/dakota/dakota_test_Rosenbrock.py new file mode 100755 index 00000000..48f21776 --- /dev/null +++ b/tests/dakota/dakota_test_Rosenbrock.py @@ -0,0 +1,29 @@ +# ------------------------------------------------------------------------------- +# Copyright 2006-2012 UT-Battelle, LLC. See LICENSE for more information. +# ------------------------------------------------------------------------------- +import os +from ipsframework import Component + + +class ResenbrockDriver(Component): + + def __init__(self, services, config): + Component.__init__(self, services, config) + + def init(self, timestamp=0): + print('init from dakota test driver') + + def step(self, timestamp=0): + print('step from dakota test driver') + services = self.services + services.stage_input_files(self.INPUT_FILES) + x1 = float(self.X1) + x2 = float(self.X2) + sim_root = services.get_config_param('SIM_ROOT') + result = 100.0 * (x2 - x1 * x1) * (x2 - x1 * x1) + (1. - x1) * (1. - x1) + out_file = os.path.join(sim_root, 'RESULT') + open(out_file, 'w').write('%.9f f' % (result)) + return + + def finalize(self, timestamp=0): + print('finalize from dakota test driver') diff --git a/tests/dakota/test_dakota.py b/tests/dakota/test_dakota.py new file mode 100644 index 00000000..568559f4 --- /dev/null +++ b/tests/dakota/test_dakota.py @@ -0,0 +1,47 @@ +from ipsframework.ips_dakota_dynamic import DakotaDynamic +import os +import shutil +import pytest +import glob + + +def copy_config_and_replace(infile, outfile, tmpdir): + with open(infile, "r") as fin: + with open(outfile, "w") as fout: + for line in fin: + if "SCRIPT" in line: + fout.write(line.replace("$PWD", str(tmpdir))) + elif line.startswith("SIM_ROOT"): + fout.write(f"SIM_ROOT = {tmpdir}/$SIM_NAME\n") + else: + fout.write(line) + + +@pytest.mark.skipif(shutil.which('dakota') is None, + reason="Requires dakota to run this test") +def test_dakota(tmpdir): + data_dir = os.path.dirname(__file__) + copy_config_and_replace(os.path.join(data_dir, "dakota_test_Rosenbrock.ips"), tmpdir.join("dakota_test_Rosenbrock.ips"), tmpdir) + shutil.copy(os.path.join(data_dir, "workstation.conf"), tmpdir) + shutil.copy(os.path.join(data_dir, "dakota_test_Rosenbrock.in"), tmpdir) + shutil.copy(os.path.join(data_dir, "dakota_test_Rosenbrock.py"), tmpdir) + + os.chdir(tmpdir) + + sweep = DakotaDynamic(dakota_cfg=os.path.join(tmpdir, "dakota_test_Rosenbrock.in"), + log_file=str(tmpdir.join('test.log')), + platform_filename=os.path.join(tmpdir, "workstation.conf"), + debug=False, + ips_config_template=os.path.join(tmpdir, "dakota_test_Rosenbrock.ips"), + restart_file=None) + sweep.run() + + # check dakota log + log_file = glob.glob(str(tmpdir.join("dakota_*.log")))[0] + with open(log_file, 'r') as f: + lines = f.readlines() + + X1, X2 = lines[-22].split()[1:] + + assert float(X1) == pytest.approx(1, rel=1e-3) + assert float(X2) == pytest.approx(1, rel=1e-3) diff --git a/tests/dakota/workstation.conf b/tests/dakota/workstation.conf new file mode 100644 index 00000000..8fb05faf --- /dev/null +++ b/tests/dakota/workstation.conf @@ -0,0 +1,30 @@ +HOST = my_laptop +MPIRUN = eval + +####################################### +# resource detection method +####################################### +NODE_DETECTION = manual # checkjob | qstat | pbs_env | slurm_env + +####################################### +# manual allocation description +####################################### +TOTAL_PROCS = 2 +NODES = 1 +PROCS_PER_NODE = 2 + +####################################### +# node topology description +####################################### +CORES_PER_NODE = 2 +SOCKETS_PER_NODE = 1 + +####################################### +# framework setting for node allocation +####################################### +# MUST ADHERE TO THE PLATFORM'S CAPABILITIES +# * EXCLUSIVE : only one task per node +# * SHARED : multiple tasks may share a node +# For single node jobs, this can be overridden allowing multiple +# tasks per node. +NODE_ALLOCATION_MODE = SHARED # SHARED | EXCLUSIVE