diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index ecf00822..10ad186e 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -18,7 +18,6 @@ jobs: uses: "./.github/workflows/lint" Test: - needs: Lint strategy: fail-fast: false matrix: diff --git a/gudpy/core/container.py b/gudpy/core/container.py index e401a42b..e1ac277c 100644 --- a/gudpy/core/container.py +++ b/gudpy/core/container.py @@ -71,7 +71,7 @@ def __init__(self, config=None): """ self.name = "" self.periodNumber = 1 - self.dataFiles = DataFiles([], "CONTAINER") + self.dataFiles = DataFiles([], "CONTAINER", True) self.composition = Composition("CONTAINER") self.geometry = Geometry.SameAsBeam self.upstreamThickness = 0.1 @@ -97,6 +97,7 @@ def __init__(self, config=None): self.grBroadening = 0. self.powerForBroadening = 0.0 self.stepSize = 0.0 + self.outputFolder = "" self.yamlignore = { "runAsSample", diff --git a/gudpy/core/data_files.py b/gudpy/core/data_files.py index f8a9b1ff..0c82174f 100644 --- a/gudpy/core/data_files.py +++ b/gudpy/core/data_files.py @@ -1,4 +1,56 @@ +import os + from core import config +from core import utils +from core.gud_file import GudFile + + +class DataFile: + def __init__(self, filename, isSampleDataFile: bool = False): + self.filename = filename + self.name = utils.replace_unwanted_chars( + os.path.splitext(filename)[0] + ) + self.ext = os.path.splitext(filename)[1] + self.outputFolder = "" + self._outputs = {} + self.isSampleDataFile = isSampleDataFile + + self.yamlignore = { + "str", + "yamlignore" + } + + def addOutput(self, path): + ext = os.path.splitext(path)[1] + self._outputs[ext] = path + + def outputs(self, ext): + return self._outputs[ext] + + def __str__(self): + return self.filename + + @property + def gudFile(self): + if not self.isSampleDataFile: + return None + gudPath = self._outputs.get(".gud", None) + if not gudPath: + return None + return GudFile(gudPath) + + @property + def mintFile(self): + if not self.isSampleDataFile: + return None + return self._outputs.get(".mint01", None) + + @property + def msubwFile(self): + if not self.isSampleDataFile: + return None + return self._outputs.get(".msubw01", None) class DataFiles: @@ -17,7 +69,7 @@ class DataFiles: ------- """ - def __init__(self, dataFiles, name): + def __init__(self, dataFiles, name, isSampleDataFile=False): """ Constructs all the necessary attributes for the DataFiles object. @@ -28,7 +80,11 @@ def __init__(self, dataFiles, name): name : str Name of the parent of the data files, e.g. Sample Background """ - self.dataFiles = dataFiles + + self._dataFiles: DataFile = [] + self.isSampleDataFile = isSampleDataFile + self.setFiles(dataFiles) + self.name = name self.yamlignore = { @@ -36,6 +92,16 @@ def __init__(self, dataFiles, name): "yamlignore" } + self.outputFolders = {} + self._outputs = {} + + def setFiles(self, dataFilenames): + self._dataFiles.clear() + for dataFile in dataFilenames: + if not dataFile: + continue + self._dataFiles.append(DataFile(dataFile, self.isSampleDataFile)) + def __str__(self): """ Returns the string representation of the DataFiles object. @@ -50,11 +116,20 @@ def __str__(self): String representation of DataFiles. """ self.str = [ - df + config.spc10 + self.name + " data files" - for df in self.dataFiles + df.filename + config.spc10 + self.name + " data files" + for df in self._dataFiles ] return """\n""".join(self.str) + @property + def dataFilenames(self): + dfNames = [df.filename for df in self._dataFiles] + return dfNames + + @property + def dataFiles(self): + return self._dataFiles + def __len__(self): """ Returns the length of the dataFiles list member. @@ -68,15 +143,15 @@ def __len__(self): int Number of data files, """ - return len(self.dataFiles) + return len(self._dataFiles) def __getitem__(self, n): - return self.dataFiles[n] + return self._dataFiles[n] def __setitem__(self, n, item): if n >= len(self): - self.dataFiles.extend(n+1) - self.dataFiles[n] = item + self._dataFiles.extend(n+1) + self._dataFiles[n] = DataFile(item) def __iter__(self): - return iter(self.dataFiles) + return iter(self._dataFiles) diff --git a/gudpy/core/file_library.py b/gudpy/core/file_library.py index 77d7acda..041e6202 100644 --- a/gudpy/core/file_library.py +++ b/gudpy/core/file_library.py @@ -1,9 +1,9 @@ import os from zipfile import ZipFile, ZIP_DEFLATED -from pathlib import Path from core import utils from core.enums import CrossSectionSource +from core.io.gudpy_io import GudPyIO class GudPyFileLibrary: @@ -64,8 +64,8 @@ def __init__(self, gudrunFile): } self.dataFiles = [ - *gudrunFile.normalisation.dataFiles.dataFiles, - *gudrunFile.normalisation.dataFilesBg.dataFiles, + *gudrunFile.normalisation.dataFiles.dataFilenames, + *gudrunFile.normalisation.dataFilesBg.dataFilenames, ] # If NXS files are being used @@ -90,15 +90,15 @@ def __init__(self, gudrunFile): # that file too. for sampleBackground in gudrunFile.sampleBackgrounds: - self.dataFiles.extend(sampleBackground.dataFiles.dataFiles) + self.dataFiles.extend(sampleBackground.dataFiles.dataFilenames) for sample in sampleBackground.samples: - self.dataFiles.extend(sample.dataFiles.dataFiles) + self.dataFiles.extend(sample.dataFiles.dataFilenames) if sample.totalCrossSectionSource == CrossSectionSource.FILE: self.files[sample.name] = sample.crossSectionFilename for container in sample.containers: - self.dataFiles.extend(container.dataFiles.dataFiles) + self.dataFiles.extend(container.dataFiles.dataFilenames) if container.totalCrossSectionSource == ( CrossSectionSource.FILE ): @@ -166,21 +166,16 @@ def checkFilesExist(self): def exportMintData( self, samples, + exportTo="", renameDataFiles=False, - exportTo=None, includeParams=False, ): - if not exportTo: - exportTo = os.path.join( - self.gudrunFile.projectDir, - Path(self.gudrunFile.path()).stem + ".zip", - ) with ZipFile(exportTo, "w", ZIP_DEFLATED) as zipFile: for sample in samples: if len(sample.dataFiles.dataFiles): path = os.path.join( self.gudrunFile.projectDir, - sample.dataFiles.dataFiles[0].replace( + sample.dataFiles.dataFilenames[0].replace( self.gudrunFile.instrument.dataFileType, "mint01" ), ) @@ -199,10 +194,10 @@ def exportMintData( self.gudrunFile.projectDir, safeSampleName + ".sample", ) + sample.sampleFile = path + if not os.path.exists(path): - sample.write_out( - self.gudrunFile.projectDir - ) + GudPyIO.writeObject(sample, path) zipFile.write(path, arcname=os.path.basename(path)) return zipFile.filename diff --git a/gudpy/core/gud_file.py b/gudpy/core/gud_file.py index 8a430dfa..53c4e3e3 100644 --- a/gudpy/core/gud_file.py +++ b/gudpy/core/gud_file.py @@ -71,13 +71,6 @@ class GudFile: Contents of the .gud file. output : str Output for use in the GUI. - Methods - ------- - parse(): - Parses the GudFile from path, assigning values - to each of the attributes. - write_out(overwrite=False) - Writes out the string representation of the GudFile to a file. """ def __init__(self, path): @@ -350,22 +343,3 @@ def __str__(self): f'{self.suggestedTweakFactor}\n' ) - - def write_out(self, path): - """ - Writes out the string representation of the GudFile. - If 'overwrite' is True, then the initial file is overwritten. - Otherwise, it is written to 'gudpy_{initial filename}.gud'. - - Parameters - ---------- - overwrite : bool, optional - Overwrite the initial file? (default is False). - - Returns - ------- - None - """ - f = open(path, "w", encoding="utf-8") - f.write(str(self)) - f.close() diff --git a/gudpy/core/gudpy.py b/gudpy/core/gudpy.py index 1072d5ea..7f463946 100644 --- a/gudpy/core/gudpy.py +++ b/gudpy/core/gudpy.py @@ -16,6 +16,7 @@ import core.output_file_handler as handlers from core.file_library import GudPyFileLibrary from core import data_files +from core.io.gudpy_io import GudPyIO SUFFIX = ".exe" if os.name == "nt" else "" @@ -25,8 +26,8 @@ def __init__( self ): self.originalGudrunFile: GudrunFile = None - self.gudrunFile: GudrunFile = None - self.purgeFile = None + self._gudrunFile: GudrunFile = None + self.io = GudPyIO() self.purge: Purge = None self.gudrun: Gudrun = None @@ -36,13 +37,21 @@ def __init__( self.gudrunOutput = None - self.projectDir = "" - self.autosaveLocation = "" + @property + def gudrunFile(self): + return self._gudrunFile - def loadFromFile( + @gudrunFile.setter + def gudrunFile(self, gudrunFile: GudrunFile): + self._gudrunFile = gudrunFile + + @property + def projectDir(self): + return self.io.projectDir + + def loadFromGudrunFile( self, loadFile: str, - format: enums.Format, config: bool = False ): """Loads GudPy from an input file @@ -51,8 +60,6 @@ def loadFromFile( ---------- loadFile : str Path of input file to load from - format : enums.Format - Format of the input file (YAML or TXT) config : bool, optional If loading from preset config, by default False @@ -62,19 +69,30 @@ def loadFromFile( Raised if input file does not exist """ if not os.path.exists(loadFile): - raise FileNotFoundError("Input file does not exist.") + raise FileNotFoundError(f"Input file '{loadFile}' does not exist.") - self.gudrunFile = GudrunFile( - loadFile=loadFile, - format=format, - config=config - ) + self.gudrunFile = self.io.importGudrunFile(loadFile, config) + + def loadFromYamlFile( + self, + loadFile: str, + ): + """Loads GudPy from a YAML input file + + Parameters + ---------- + loadFile : str + Path of input file to load from - self.originalGudrunFile = copy.deepcopy(self.gudrunFile) - self.originalGudrunFile.filename = "original" + Raises + ------ + FileNotFoundError + Raised if input file does not exist + """ + if not os.path.exists(loadFile): + raise FileNotFoundError("Input file does not exist.") - self.projectDir == "" - self.autosaveLocation = "" + self.gudrunFile = self.io.importFromYamlFile(loadFile) def loadFromProject(self, projectDir: str): """Loads GudPy from a project directory @@ -90,29 +108,10 @@ def loadFromProject(self, projectDir: str): Raised if there is no YAML input file in the project directory """ - loadFile = "" - - if os.path.exists(os.path.join( - projectDir, - f"{os.path.basename(projectDir)}.yaml" - )): - # If default file exists - loadFile = os.path.join( - projectDir, - f"{os.path.basename(projectDir)}.yaml" - ) - else: - # Try to find yaml files - for f in os.listdir(projectDir): - if os.path.splitext(f)[1] == ".yaml": - # If file is yaml - loadFile = os.path.join(projectDir, f) - if not loadFile: - raise FileNotFoundError( - "Could not find GudPy input file within the project") + if not os.path.exists(projectDir): + raise FileNotFoundError("Directory does not exist.") - self.loadFromFile(loadFile=loadFile, format=enums.Format.YAML) - self.setSaveLocation(projectDir) + self.gudrunFile = self.io.importProject(projectDir) def checkSaveLocation(self): """Checks if user has set the save location @@ -122,7 +121,7 @@ def checkSaveLocation(self): bool If the save location is set or not """ - return bool(self.projectDir) + return self.io.checkSaveLocation() def setSaveLocation(self, projectDir: str): """Sets the save location @@ -132,28 +131,12 @@ def setSaveLocation(self, projectDir: str): projectDir : str Path to the desired save location of project """ - self.projectDir = projectDir - self.gudrunFile.filename = f"{os.path.basename(projectDir)}.yaml" - self.gudrunFile.projectDir = projectDir - self.originalGudrunFile.projectDir = projectDir - self.autosaveLocation = ( - f"{os.path.basename(projectDir)}.autosave" - ) + self.io.setSaveLocation(projectDir) - def save(self, path: str = "", format: enums.Format = enums.Format.YAML): + def save(self): """Saves current GudPy input file - - Parameters - ---------- - path : str, optional - Path to desired save location, by default "" - format : enums.Format, optional - Desired format of save file, by default enums.Format.YAML """ - if not path: - path = self.gudrunFile.path() - self.originalGudrunFile.save(path=path) - self.gudrunFile.save(path=path, format=format) + self.io.save(self.gudrunFile) def saveAs(self, targetDir: str): """Save GudPy project to a new location, set current @@ -169,26 +152,7 @@ def saveAs(self, targetDir: str): IsADirectoryError Raised if the requested save location already exists """ - if os.path.exists(targetDir): - raise IsADirectoryError("Cannot be an existing directory") - - oldDir = self.projectDir - self.setSaveLocation(targetDir) - os.makedirs(targetDir) - - if os.path.exists(os.path.join(oldDir, "Purge")): - shutil.copytree( - os.path.join(oldDir, "Purge"), - os.path.join(targetDir, "Purge") - ) - if os.path.exists(os.path.join(oldDir, "Gudrun")): - shutil.copytree( - os.path.join(oldDir, "Gudrun"), - os.path.join(targetDir, "Gudrun") - ) - self.gudrunFile.filename = os.path.basename(targetDir) - self.gudrunFile.save(path=self.gudrunFile.path(), - format=enums.Format.YAML) + self.io.exportProject(self.gudrunFile, targetDir) self.loadFromProject(projectDir=targetDir) def checkFilesExist(self): @@ -231,9 +195,9 @@ def runPurge(self): """ self.prepareRun() self.purge = Purge() - self.purgeFile = PurgeFile(self.gudrunFile) - exitcode = self.purge.purge(self.purgeFile) - self.gudrunFile.save() + purgeFile = PurgeFile(self.gudrunFile) + exitcode = self.purge.purge(purgeFile) + self.save() if exitcode: raise exc.PurgeException( "Purge failed to run with the following output:\n" @@ -257,6 +221,7 @@ def runGudrun(self, gudrunFile: GudrunFile = None): if not gudrunFile: gudrunFile = self.gudrunFile + self.save() self.gudrun = Gudrun() exitcode = self.gudrun.gudrun(gudrunFile=gudrunFile) if exitcode: @@ -265,6 +230,7 @@ def runGudrun(self, gudrunFile: GudrunFile = None): f"{self.gudrun.error}" ) self.gudrunOutput = self.gudrun.gudrunOutput + self.save() def iterateGudrun(self, iterator: iterators.Iterator): """Runs gudrun_dcs iteratively while tweaking parameters @@ -437,7 +403,7 @@ def organiseOutput(self, procDir: str, projectDir: str): def purge(self, purgeFile: PurgeFile): self.checkBinary() with tempfile.TemporaryDirectory() as tmp: - purgeFile.write_out(os.path.join( + GudPyIO.writeObject(purgeFile, os.path.join( tmp, f"{self.PROCESS}.dat" )) @@ -460,7 +426,7 @@ def purge(self, purgeFile: PurgeFile): return self.exitcode self.purgeLocation = self.organiseOutput( - tmp, purgeFile.gudrunFile.projectDir) + tmp, GudPyIO.projectDir) self.exitcode = 0 return self.exitcode @@ -485,12 +451,18 @@ def organiseOutput( gudrunOutput = outputHandler.organiseOutput(exclude=exclude) return gudrunOutput + def obtainDependencies( + fileSelfScattering, + gudrunFile: GudrunFile + ): + if gudrunFile.instrument.subWavelengthBinnedData: + return + def gudrun( self, gudrunFile: GudrunFile, purge: Purge = None, iterator: iterators.Iterator = None, - save: bool = True ) -> int: self.checkBinary() if not purge: @@ -510,7 +482,7 @@ def gudrun( tmp, gudrunFile.OUTPATH ) - gudrunFile.write_out(path) + GudPyIO.writeGudrunFile(gudrunFile, tmp) with subprocess.Popen( [self.BINARY_PATH, path], cwd=tmp, stdout=subprocess.PIPE, @@ -532,14 +504,6 @@ def gudrun( else: self.gudrunOutput = self.organiseOutput( gudrunFile, exclude=purgeFiles) - if save: - gudrunFile.save( - path=os.path.join( - gudrunFile.projectDir, - f"{gudrunFile.filename}" - ), - format=enums.Format.YAML - ) gudrunFile.setGudrunDir(self.gudrunOutput.path) self.exitcode = 0 @@ -575,22 +539,81 @@ def singleIteration( gudrun: Gudrun, purge: Purge, prevOutput: handlers.GudrunOutput, - save=True ) -> typ.Tuple[int, str]: # (exitcode, error) modGfFile = self.iterator.performIteration(gudrunFile, prevOutput) - exitcode = gudrun.gudrun(modGfFile, purge, self.iterator, save=save) + exitcode = gudrun.gudrun(modGfFile, purge, self.iterator) + if exitcode: + return exitcode + self.gudrunOutput = gudrun.gudrunOutput + return 0 + + def iterate(self, purge) -> typ.Tuple[int, str]: + prevOutput = None + + # If the iterator requires a prelimenary run + if self.iterator.requireDefault: + exitcode = self.gudrunObjects[0].gudrun( + self.gudrunFile, purge, self.iterator) + if exitcode: # An exit code != 0 indicates failure + self.exitcode = (exitcode, self.gudrunObjects[0].error) + return self.exitcode + prevOutput = self.gudrunObjects[0].gudrunOutput + + # Iterate through gudrun objects + for gudrun in self.gudrunObjects: + if gudrun.output: + # If object has already been run, skip + continue + exitcode = self.singleIteration( + self.gudrunFile, gudrun, purge, prevOutput) + if exitcode: # An exit code != 0 indicates failure + self.exitcode = (exitcode, gudrun.error) + return self.exitcode + + prevOutput = gudrun.gudrunOutput + + self.result = self.iterator.result + + self.exitcode = (0, "") + return self.exitcode + + +class InelasiticityIterator(GudrunIterator): + def __init__( + self, + gudrunFile: GudrunFile, + iterator: iterators.Iterator, + ): + + super().__init__( + gudrunFile=gudrunFile, + iterator=iterator + ) + + def copyDCS(self, prevOutput): + return + + def singleIteration( + self, + gudrunFile: GudrunFile, + gudrun: Gudrun, + purge: Purge, + prevOutput: handlers.GudrunOutput, + ) -> typ.Tuple[int, str]: # (exitcode, error) + modGfFile = self.iterator.performIteration(gudrunFile, prevOutput) + exitcode = gudrun.gudrun(modGfFile, purge, self.iterator) if exitcode: return exitcode self.gudrunOutput = gudrun.gudrunOutput return 0 - def iterate(self, purge, save=True) -> typ.Tuple[int, str]: + def iterate(self, purge) -> typ.Tuple[int, str]: prevOutput = None # If the iterator requires a prelimenary run if self.iterator.requireDefault: exitcode = self.gudrunObjects[0].gudrun( - self.gudrunFile, purge, self.iterator, save) + self.gudrunFile, purge, self.iterator) if exitcode: # An exit code != 0 indicates failure self.exitcode = (exitcode, self.gudrunObjects[0].error) return self.exitcode @@ -602,7 +625,7 @@ def iterate(self, purge, save=True) -> typ.Tuple[int, str]: # If object has already been run, skip continue exitcode = self.singleIteration( - self.gudrunFile, gudrun, purge, prevOutput, save) + self.gudrunFile, gudrun, purge, prevOutput) if exitcode: # An exit code != 0 indicates failure self.exitcode = (exitcode, gudrun.error) return self.exitcode @@ -876,14 +899,14 @@ def bactchProcess( batchSize: int, iterator: iterators.Iterator = None, ): - self.batchedGudrunFile.projectDir = (os.path.join( - self.batchedGudrunFile.projectDir, + self.batchedGudrunFile.outputFolder = (os.path.join( + GudPyIO.projectDir, f"BATCH_PROCESSING_BATCH_SIZE{batchSize}" )) exitcode = gudrun.gudrun(GudrunFile, purge, iterator) self.writeDiagnosticsFile( os.path.join( - self.batchedGudrunFile.path(), + self.batchedGudrunFile.outputFolder, "batch_processing_diagnostics.txt", ), self.batchedGudrunFile, diff --git a/gudpy/core/gudpy_yaml.py b/gudpy/core/gudpy_yaml.py deleted file mode 100644 index 4682db6c..00000000 --- a/gudpy/core/gudpy_yaml.py +++ /dev/null @@ -1,253 +0,0 @@ -from abc import abstractmethod -from enum import Enum -from ruamel.yaml import YAML as yaml -from ruamel.yaml import YAMLError -import os - -from core.composition import ( - Component, Components, Composition, WeightedComponent -) -from core.data_files import DataFiles -from core.element import Element -from core.exception import YAMLException -from core.gui_config import GUIConfig -from core import utils - -from core.instrument import Instrument -from core.beam import Beam -from core.normalisation import Normalisation -from core.sample_background import SampleBackground -from core.sample import Sample -from core.container import Container -from core import config - - -class YAML: - - def __init__(self): - self.yaml = self.getYamlModule() - - def getYamlModule(self): - yaml_ = yaml() - yaml_.preserve_quotes = True - yaml_.default_flow_style = None - yaml_.encoding = 'utf-8' - return yaml_ - - def parseYaml(self, path): - self.path = path - try: - parsedYAML = self.constructClasses(self.yamlToDict(path)) - except YAMLError as e: - # Exception caused by yaml parsing library - raise YAMLException(f"Invalid YAML file: {str(e)}") - except YAMLException as e: - # Exception caused by invalid arguments - raise YAMLException(e) - else: - return parsedYAML - - def yamlToDict(self, path): - # Read the input stream into our attribute. - with open(path, encoding=self.yaml.encoding) as fp: - return self.yaml.load(fp) - - def constructClasses(self, yamldict): - instrument = Instrument() - if "Instrument" in yamldict: - self.maskYAMLDicttoClass(instrument, yamldict["Instrument"]) - instrument.GudrunInputFileDir = os.path.dirname( - os.path.abspath( - self.path - ) - ) - - beam = Beam() - if "Beam" in yamldict: - self.maskYAMLDicttoClass(beam, yamldict["Beam"]) - - components = Components() - if "Components" in yamldict: - self.maskYAMLSeqtoClass(components, yamldict["Components"]) - - normalisation = Normalisation() - if "Normalisation" in yamldict: - self.maskYAMLDicttoClass(normalisation, yamldict["Normalisation"]) - - sampleBackgrounds = [] - if "SampleBackgrounds" in yamldict: - for sbyaml in yamldict["SampleBackgrounds"]: - sampleBackground = SampleBackground() - self.maskYAMLDicttoClass(sampleBackground, sbyaml) - sampleBackgrounds.append(sampleBackground) - - GUI = GUIConfig() - if "GUI" in yamldict: - self.maskYAMLDicttoClass(GUI, yamldict["GUI"]) - - return ( - instrument, beam, components, - normalisation, sampleBackgrounds, GUI - ) - - @abstractmethod - def maskYAMLDicttoClass(self, cls, yamldict): - for k, v in yamldict.items(): - if not hasattr(cls, k): - # If attribute is not valid - raise YAMLException( - f"Invalid attribute '{k}' given to '{type(cls).__name__}'") - - if isinstance(cls.__dict__[k], Enum): - setattr(cls, k, type(cls.__dict__[k])[v]) - - elif isinstance(cls.__dict__[k], DataFiles): - setattr( - cls, k, - DataFiles( - [v_ for v_ in v["dataFiles"]], v["name"]) - ) - - elif ( - isinstance( - cls, - (Component, Composition) - ) - and k == "elements" - ): - elements = [] - for idx, element in enumerate(v): - # Ensuring correct arguements are provided - if ( - "atomicSymbol" not in element - or "massNo" not in element - or "abundance" not in element - ): - raise YAMLException( - "Insufficient arguments given to element" - + f" {idx + 1}. Expects 'atomicSymbol', 'massNo'" - + " and 'abundance'" - ) - - # Setting element properties - try: - element_ = Element( - **{ - "atomicSymbol": element["atomicSymbol"], - "massNo": float(element["massNo"]), - "abundance": float(element["abundance"]) - } - ) - elements.append(element_) - except ValueError: - raise YAMLException( - f"Invalid number given to element {idx + 1}" - + f" in '{type(cls).__name__}") - setattr(cls, k, elements) - - elif isinstance(cls, Composition) and k == "weightedComponents": - weightedComponents = [] - for weightedComponent in v: - if ( - "component" not in weightedComponent - or "ratio" not in weightedComponent - ): - raise YAMLException( - "Weighted Component expects 'component' and" - + " 'ratio' to be provided") - component = Component() - self.maskYAMLDicttoClass( - component, weightedComponent["component"] - ) - ratio = weightedComponent["ratio"] - try: - weightedComponents.append( - WeightedComponent( - component, float(ratio)) - ) - except ValueError: - raise YAMLException( - "Invalid ratio given to Weighted Component") - setattr(cls, k, weightedComponents) - - elif ( - isinstance( - cls, - (Normalisation, Sample, Container) - ) - and k == "composition" - ): - self.maskYAMLDicttoClass(cls.__dict__[k], v) - - elif isinstance(cls, SampleBackground) and k == "samples": - for sampleyaml in yamldict[k]: - sample = Sample() - self.maskYAMLDicttoClass(sample, sampleyaml) - sample.name = utils.replace_unwanted_chars(sample.name) - cls.samples.append(sample) - - elif isinstance(cls, Sample) and k == "containers": - for contyaml in yamldict[k]: - container = Container() - self.maskYAMLDicttoClass(container, contyaml) - cls.containers.append(container) - - else: - setattr(cls, k, type(cls.__dict__[k])(self.toBuiltin(v))) - - def maskYAMLSeqtoClass(self, cls, yamlseq): - if isinstance(cls, Components): - components = [] - for component in yamlseq: - component_ = Component() - self.maskYAMLDicttoClass(component_, component) - components.append(component_) - setattr(cls, "components", components) - - def writeYAML(self, base, path): - with open(path, "wb") as fp: - outyaml = { - "Instrument": base.instrument, - "Beam": base.beam, - "Components": base.components.components, - "Normalisation": base.normalisation, - "SampleBackgrounds": base.sampleBackgrounds, - "GUI": config.GUI - } - self.yaml.dump( - {k: self.toYaml(v) for k, v in outyaml.items()}, - fp - ) - - @abstractmethod - def toYaml(self, var): - if var.__class__.__module__ == "ruamel.yaml.scalarfloat": - return float(var) - if var.__class__.__module__ == "builtins": - if isinstance(var, (list, tuple)): - return type(var)([self.toYaml(v) for v in var]) - else: - return var - elif isinstance(var, Enum): - return type(var)(var.value).name - elif isinstance(var, ( - Instrument, Beam, Components, Normalisation, - SampleBackground, Sample, Container, WeightedComponent, - Component, Composition, Element, DataFiles, GUIConfig - )): - return { - k: self.toYaml(v) - for k, v in var.__dict__.items() - if k not in var.yamlignore - } - - @abstractmethod - def toBuiltin(self, yamlvar): - if isinstance(yamlvar, (list, tuple)): - return [self.toBuiltin(v) for v in yamlvar] - elif yamlvar.__class__.__module__ == "builtins": - return yamlvar - elif yamlvar.__class__.__module__ == "ruamel.yaml.scalarfloat": - return float(yamlvar) - elif yamlvar.__class__.__module__ == "ruamel.yaml.scalarstring": - return str(yamlvar) diff --git a/gudpy/core/gudrun_file.py b/gudpy/core/gudrun_file.py index 0a50f4d6..0ee890ed 100644 --- a/gudpy/core/gudrun_file.py +++ b/gudpy/core/gudrun_file.py @@ -1,43 +1,20 @@ import os import time -import re from copy import deepcopy -from core.utils import ( - extract_nums_from_string, - firstword, boolifyNum, - extract_ints_from_string, - extract_floats_from_string, - firstNFloats, - firstNInts, - nthfloat, - nthint, - uniquifyName -) from core.instrument import Instrument from core.beam import Beam from core.normalisation import Normalisation -from core.sample import Sample from core.sample_background import SampleBackground -from core.container import Container -from core.composition import Component, Components, Composition -from core.element import Element -from core.data_files import DataFiles -from core.enums import ( - CrossSectionSource, Format, Instruments, FTModes, UnitsOfDensity, - MergeWeights, Scales, NormalisationType, OutputUnits, - Geometry -) -from core import utils +from core.composition import Components from core import config as cfg -from core.gudpy_yaml import YAML -from core.exception import ParserException, YAMLException from core.gud_file import GudFile SUFFIX = ".exe" if os.name == "nt" else "" class GudrunFile: + OUTPATH = "gudpy.txt" """ Class to represent a GudFile (files with .gud extension). .gud files are outputted by gudrun_dcs, via merge_routines @@ -47,10 +24,6 @@ class GudrunFile: Attributes ---------- - path : str - Path to the file. - OUTPATH : str - Path to write to, when not overwriting the initial file. instrument : Instrument Instrument object extracted from the input file. beam : Beam @@ -59,11 +32,6 @@ class GudrunFile: Normalisation object extracted from the input file. sampleBackgrounds : SampleBackground[] List of SampleBackgrounds extracted from the input file. - purged : bool - Have the detectors been purged? - stream : str[] - List of strings, where each item represents a line - in the input stream. Methods ------- getNextToken(): @@ -109,21 +77,15 @@ class GudrunFile: sampleBackgroundHelper(): Parses the SampleBackground, its Samples and their Containers. Returns the SampleBackground object. - parse(): - Parse the GudrunFile from its path. - Assign objects from the file to the attributes of the class. - write_out(overwrite=False) - Writes out the string representation of the GudrunFile to a file. - purge(): - Create a PurgeFile from the GudrunFile, and run purge_det on it. """ def __init__( self, - projectDir=None, - loadFile=None, - format=Format.YAML, - config=False + instrument=Instrument(), + beam=Beam(), + normalisation=Normalisation(), + sampleBackgrounds: list[SampleBackground] = [], + components=Components() ): """ Constructs all the necessary attributes for the GudrunFile object. @@ -141,37 +103,13 @@ def __init__( config : bool If a new input file should be constructed from a config """ - - self.yaml = YAML() - self.format = format - - # Construct the outpath of generated input file - self.OUTPATH = "gudpy.txt" - self.gudrunOutput = None - self.projectDir = projectDir - self.loadFile = loadFile - self.filename = None - self.stream = None - - self.instrument = Instrument() - self.normalisation = Normalisation() - self.beam = Beam() - self.sampleBackgrounds = [] - self.components = Components() - - if not projectDir and not loadFile: - raise RuntimeError( - "GudrunFile needs to be initialised with either" - " a project directory or load file specified" - ) - - if loadFile: - self.setGudrunDir(os.path.dirname(loadFile)) - - if not config: - self.setGudrunDir(os.path.dirname(loadFile)) - - self.parse(loadFile, config=config) + self.instrument = instrument + self.normalisation = normalisation + self.beam = beam + self.sampleBackgrounds = sampleBackgrounds + self.components = components + self.purged = False + self.outputFolder = "" def __deepcopy__(self, memo): result = self.__class__.__new__(self.__class__) @@ -182,1230 +120,48 @@ def __deepcopy__(self, memo): setattr(result, k, deepcopy(v, memo)) return result - def path(self): - if not self.projectDir: - return None - else: - return os.path.join(self.projectDir, self.filename) - - def checkNormDataFiles(self): - return (len(self.normalisation.dataFiles) - and len(self.normalisation.dataFilesBg)) - - def getNextToken(self): - """ - Pops the 'next token' from the stream and returns it. - Essentially removes the first line in the stream and returns it. - - Parameters - ---------- - None - Returns - ------- - str | None - """ - return self.stream.pop(0) if self.stream else None - - def peekNextToken(self): - """ - Returns the next token in the input stream, without removing it. - - Parameters - ---------- - None - Returns - ------- - str | None - """ - return self.stream[0] if self.stream else None - - def consumeTokens(self, n): - """ - Consume n tokens from the input stream. - - Parameters - ---------- - None - Returns - ------- - None - """ - for _ in range(n): - self.getNextToken() - - def consumeUpToDelim(self, delim): - """ - Consume tokens iteratively, until a delimiter is reached. - - Parameters - ---------- - None - Returns - ------- - None - """ - line = self.getNextToken() - while line[0] != delim: - line = self.getNextToken() - - def consumeWhitespace(self): - """ - Consume tokens iteratively, while they are whitespace. - - Parameters - ---------- - None - Returns - ------- - None - """ - line = self.peekNextToken() - if line and line.isspace(): - self.getNextToken() - line = self.peekNextToken() - - def parseInstrument(self): - """ - Intialises an Instrument object and assigns it to the - instrument attribute. - Parses the attributes of the Instrument from the input stream. - Raises a ParserException if any mandatory attributes are missing. - - - Parameters - ---------- - None - Returns - ------- - None - """ - try: - self.consumeWhitespace() - - # For string attributes, - # we simply extract the firstword in the line. - self.instrument.name = Instruments[firstword(self.getNextToken())] - self.consumeTokens(1) - self.instrument.dataFileDir = os.path.abspath( - firstword(self.getNextToken())) + os.path.sep - self.instrument.dataFileType = firstword(self.getNextToken()) - self.instrument.detectorCalibrationFileName = ( - firstword(self.getNextToken()) - ) - - # For single integer attributes, - # we extract the zeroth int from the line. - self.instrument.columnNoPhiVals = nthint(self.getNextToken(), 0) - self.instrument.groupFileName = firstword(self.getNextToken()) - self.instrument.deadtimeConstantsFileName = ( - firstword(self.getNextToken()) - ) - - # For N integer attributes, - # we extract the first N integers from the line. - self.instrument.spectrumNumbersForIncidentBeamMonitor = ( - extract_ints_from_string(self.getNextToken()) - ) - - # For integer pair attributes, - # we extract the first 2 integers from the line. - self.instrument.wavelengthRangeForMonitorNormalisation = ( - firstNFloats(self.getNextToken(), 2) - ) - - if all( - self.instrument.wavelengthRangeForMonitorNormalisation - ) == 0.0: - self.instrument.wavelengthRangeForMonitorNormalisation = [ - 0, 0 - ] - - self.instrument.spectrumNumbersForTransmissionMonitor = ( - extract_ints_from_string(self.getNextToken()) - ) - - # For single float attributes, - # we extract the zeroth float from the line. - self.instrument.incidentMonitorQuietCountConst = ( - nthfloat(self.getNextToken(), 0) - ) - self.instrument.transmissionMonitorQuietCountConst = ( - nthfloat(self.getNextToken(), 0) - ) - - self.instrument.channelNosSpikeAnalysis = ( - firstNInts(self.getNextToken(), 2) - ) - self.instrument.spikeAnalysisAcceptanceFactor = ( - nthfloat(self.getNextToken(), 0) - ) - - # Extract wavelength range - # Which consists of the first 3 floats - # (min, max, step) in the line. - wavelengthRange = firstNFloats(self.getNextToken(), 3) - self.instrument.wavelengthMin = wavelengthRange[0] - self.instrument.wavelengthMax = wavelengthRange[1] - self.instrument.wavelengthStep = wavelengthRange[2] - - self.instrument.NoSmoothsOnMonitor = nthint(self.getNextToken(), 0) - - # Extract X range - # Which consists of the first 3 floats - # (min, max, step) in the line. - XRange = firstNFloats(self.getNextToken(), 3) - - self.instrument.XMin = XRange[0] - self.instrument.XMax = XRange[1] - self.instrument.XStep = XRange[2] - - # Extract the grouping parameter panel. - # Each row in the panel consists of the first 4 ints - # (Group, XMin, XMax, Background Factor) in the line. - # If the marker line is encountered, - # then the panel has been parsed. - - line = self.getNextToken() - while "to end input of specified values" not in line: - group = nthint(line, 0) - xMin = nthfloat(line, 1) - xMax = nthfloat(line, 2) - backgroundFactor = nthfloat(line, 3) - self.instrument.groupingParameterPanel.append( - [group, xMin, xMax, backgroundFactor] - ) - line = self.getNextToken() - - self.instrument.groupsAcceptanceFactor = ( - nthfloat(self.getNextToken(), 0) - ) - self.instrument.mergePower = nthint(self.getNextToken(), 0) - - # For boolean attributes, we convert the first - # integer in the line to its boolean value. - self.instrument.subSingleAtomScattering = ( - boolifyNum(nthint(self.getNextToken(), 0)) - ) - - # For enumerated attributes, where the value of the attribute is - # the first integer in the line, and we must get the member, - # we do this: Enum[Enum(value).name] - self.instrument.mergeWeights = ( - MergeWeights[MergeWeights(nthint(self.getNextToken(), 0)).name] - ) - self.instrument.incidentFlightPath = ( - nthfloat(self.getNextToken(), 0) - ) - self.instrument.spectrumNumberForOutputDiagnosticFiles = ( - nthint(self.getNextToken(), 0) - ) - - self.instrument.neutronScatteringParametersFile = ( - firstword(self.getNextToken()) - - ) - self.instrument.scaleSelection = ( - Scales[Scales(nthint(self.getNextToken(), 0)).name] - ) - self.instrument.subWavelengthBinnedData = ( - boolifyNum(nthint(self.getNextToken(), 0)) - ) - self.consumeTokens(2) - self.instrument.logarithmicStepSize = ( - nthfloat(self.getNextToken(), 0) - ) - self.instrument.hardGroupEdges = ( - boolifyNum(nthint(self.getNextToken(), 0)) - ) - - # If NeXus files are being used, then we expect a NeXus definition - # file to be present, and extract it. - if ( - self.instrument.dataFileType == "NXS" - or self.instrument.dataFileType == "nxs" - ): - self.instrument.nxsDefinitionFile = ( - firstword(self.getNextToken()) - ) - - if self.config: - self.instrument.goodDetectorThreshold = nthint( - self.getNextToken(), 0 - ) - - # Consume whitespace and the closing brace. - self.consumeUpToDelim("}") - - # Resolve the paths, to make them relative. - # First construct the regular expression to match against. - pattern = re.compile(r"StartupFiles\S*") - - match = re.search( - pattern, - self.instrument.detectorCalibrationFileName - ) - - if match: - self.instrument.detectorCalibrationFileName = match.group() - - match = re.search( - pattern, - self.instrument.groupFileName - ) - - if match: - self.instrument.groupFileName = match.group() - - match = re.search( - pattern, - self.instrument.deadtimeConstantsFileName - ) - - if match: - self.instrument.deadtimeConstantsFileName = match.group() - - match = re.search( - pattern, - self.instrument.neutronScatteringParametersFile - ) - - if match: - self.instrument.neutronScatteringParametersFile = match.group() - - match = re.search( - pattern, - self.instrument.neutronScatteringParametersFile - ) - - if match: - self.instrument.neutronScatteringParametersFile = match.group() - - except Exception as e: - raise ParserException( - "Whilst parsing Instrument, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing." - f"{str(e)}" - ) from e - - def parseBeam(self): - """ - Intialises a Beam object and assigns it to the - beam attribute. - Parses the attributes of the Beam from the input stream. - Raises a ParserException if any mandatory attributes are missing. - - - Parameters - ---------- - None - Returns - ------- - None - """ - - try: - # Initialise beam attribute to a new instance of Beam. - self.beam = Beam() - - self.consumeWhitespace() - - # For enumerated attributes, - # where the member name of the attribute is - # the first 'word' in the line, and we must get the member, - # we do this: Enum[memberName]. - self.beam.sampleGeometry = Geometry[firstword(self.getNextToken())] - - # Set the global geometry. - cfg.geometry = self.beam.sampleGeometry - - # Ignore the number of beam values. - self.consumeTokens(1) - - # For N float attributes, - # we extract the first N floats from the line. - self.beam.beamProfileValues = ( - extract_floats_from_string(self.getNextToken()) - ) - - # For single float attributes, - # we extract the zeroth float from the line. - range = self.getNextToken() - self.beam.stepSizeAbsorption = nthfloat(range, 0) - self.beam.stepSizeMS = nthfloat(range, 1) - self.beam.noSlices = nthint(range, 2) - self.beam.angularStepForCorrections = ( - nthint(self.getNextToken(), 0) - ) - - # Extract the incident beam edges - # relative to the centroid of the sample. - incidentBeamEdges = self.getNextToken() - self.beam.incidentBeamLeftEdge = nthfloat(incidentBeamEdges, 0) - self.beam.incidentBeamRightEdge = nthfloat(incidentBeamEdges, 1) - self.beam.incidentBeamBottomEdge = nthfloat(incidentBeamEdges, 2) - self.beam.incidentBeamTopEdge = nthfloat(incidentBeamEdges, 3) - - # Extract the scattered beam edges - # relative to the centroid of the sample. - scatteredBeamEdges = self.getNextToken() - self.beam.scatteredBeamLeftEdge = nthfloat(scatteredBeamEdges, 0) - self.beam.scatteredBeamRightEdge = nthfloat(scatteredBeamEdges, 1) - self.beam.scatteredBeamBottomEdge = nthfloat(scatteredBeamEdges, 2) - self.beam.scatteredBeamTopEdge = nthfloat(scatteredBeamEdges, 3) - - # For string attributes, - # we simply extract the firstword in the line. - self.beam.filenameIncidentBeamSpectrumParams = ( - firstword(self.getNextToken()) - ) - - # Now match it against a pattern, - # to resolve the path to be relative. - pattern = re.compile(r"StartupFiles\S*") - - match = re.search( - pattern, - self.beam.filenameIncidentBeamSpectrumParams - ) - - if match: - self.beam.filenameIncidentBeamSpectrumParams = match.group() - - self.beam.overallBackgroundFactor = ( - nthfloat(self.getNextToken(), 0) - ) - self.beam.sampleDependantBackgroundFactor = ( - nthfloat(self.getNextToken(), 0) - ) - self.beam.shieldingAttenuationCoefficient = ( - nthfloat(self.getNextToken(), 0) - ) - - # Consume whitespace and the closing brace. - self.consumeUpToDelim("}") - - except Exception as e: - raise ParserException( - "Whilst parsing Beam, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing." - ) from e - - def parseNormalisation(self): - """ - Intialises a Normalisation object and assigns it to the - normalisation attribute. - Parses the attributes of the Normalisation from the input stream. - Raises a ParserException if any mandatory attributes are missing. - - - Parameters - ---------- - None - Returns - ------- - None - """ - - try: - # Initialise normalisation attribute - # to a new instance of Normalisation. - self.normalisation = Normalisation() - - self.consumeWhitespace() - - # The number of files and period number are both stored - # on the same line. - # So we extract the 0th integer for the number of files, - # and the 1st integer for the period number. - dataFileInfo = self.getNextToken() - numberOfFiles = nthint(dataFileInfo, 0) - self.normalisation.periodNumber = nthint(dataFileInfo, 1) - - # Extract data files - dataFiles = [] - for _ in range(numberOfFiles): - dataFiles.append(firstword(self.getNextToken())) - # Sorts list so that it is in ascending order - dataFiles.sort() - - # Create a DataFiles object from the dataFiles list constructed. - self.normalisation.dataFiles = ( - DataFiles(dataFiles, "NORMALISATION") - ) - - # The number of background files and - # background period number are both stored - # on the same line. - # So we extract the 0th integer for the number of background files, - # and the 1st integer for the background riod number. - dataFileInfoBg = self.getNextToken() - numberOfFilesBg = nthint(dataFileInfoBg, 0) - self.normalisation.periodNumberBg = nthint(dataFileInfoBg, 1) - - # Extract background data files - dataFilesBg = [] - for j in range(numberOfFilesBg): - dataFilesBg.append(firstword(self.getNextToken())) - - # Sorts list so that it is in ascending order - dataFilesBg.sort() - - # Create a DataFiles object from the dataFiles list constructed. - self.normalisation.dataFilesBg = ( - DataFiles(dataFilesBg, "NORMALISATION BACKGROUND") - ) - - # For boolean attributes, we convert the first - # integer in the line to its boolean value. - self.normalisation.forceCalculationOfCorrections = ( - boolifyNum(nthint(self.getNextToken(), 0)) - ) - - # Construct composition - composition = [] - line = self.getNextToken() - # Extract the composition. - # Each element in the composition consists of the first 'word', - # integer at the second position, and float at the third position, - # (Atomic Symbol, MassNo, Abundance) in the line. - # If the marker line is encountered, - # then the panel has been parsed. - while "end of composition input" not in line: - atomicSymbol = firstword(line) - massNo = nthfloat(line, 1) - abundance = nthfloat(line, 2) - - # Create an Element object and append to the composition list. - composition.append( - Element(atomicSymbol, massNo, abundance) - ) - line = self.getNextToken() - - # Create a Composition object from the dataFiles list constructed. - self.normalisation.composition = ( - Composition("Normalisation", elements=composition) - ) - - # For enumerated attributes, - # where the member name of the attribute is - # the first 'word' in the line, and we must get the member, - # we do this: Enum[memberName]. - self.normalisation.geometry = ( - Geometry[firstword(self.getNextToken())] - ) - - # Is the geometry FLATPLATE? - if ( - ( - self.normalisation.geometry == Geometry.SameAsBeam - and cfg.geometry == Geometry.FLATPLATE - ) - or self.normalisation.geometry == Geometry.FLATPLATE): - # If is is FLATPLATE, then extract the upstream and downstream - # thickness, the angle of rotation and sample width. - thickness = self.getNextToken() - self.normalisation.upstreamThickness = nthfloat(thickness, 0) - self.normalisation.downstreamThickness = ( - nthfloat(thickness, 1) - ) - geometryInfo = self.getNextToken() - self.normalisation.angleOfRotation = nthfloat(geometryInfo, 0) - self.normalisation.sampleWidth = nthfloat(geometryInfo, 1) - else: - - # Otherwise, it is CYLINDRICAL, - # then extract the inner and outer - # radii and the sample height. - radii = self.getNextToken() - self.normalisation.innerRadius = nthfloat(radii, 0) - self.normalisation.outerRadius = nthfloat(radii, 1) - self.normalisation.sampleHeight = ( - nthfloat(self.getNextToken(), 0) - ) - - # Extract the density. - density = nthfloat(self.getNextToken(), 0) - - # Take the absolute value of the density - since it could be -ve. - self.normalisation.density = abs(density) - - # Decide on the units of density. - # -ve density means it is atomic (atoms/A^3) - # +ve means it is chemical (gm/cm^3) - self.normalisation.densityUnits = ( - UnitsOfDensity.ATOMIC if - density < 0 - else UnitsOfDensity.CHEMICAL - ) - - self.normalisation.tempForNormalisationPC = ( - nthfloat(self.getNextToken(), 0) - ) - crossSectionSource = firstword(self.getNextToken()) - if ( - crossSectionSource == "TABLES" - or crossSectionSource == "TRANSMISSION" - ): - self.normalisation.totalCrossSectionSource = ( - CrossSectionSource[crossSectionSource] - ) - else: - self.normalisation.totalCrossSectionSource = ( - CrossSectionSource.FILE - ) - self.normalisation.crossSectionFilename = crossSectionSource - - self.normalisation.normalisationDifferentialCrossSectionFile = ( - firstword(self.getNextToken()) - ) - - self.normalisation.lowerLimitSmoothedNormalisation = ( - nthfloat(self.getNextToken(), 0) - ) - self.normalisation.normalisationDegreeSmoothing = ( - nthfloat(self.getNextToken(), 0) - ) - self.normalisation.minNormalisationSignalBR = ( - nthfloat(self.getNextToken(), 0) - ) - - # Consume whitespace and the closing brace. - self.consumeUpToDelim("}") - - # Resolve to relative. - pattern = re.compile(r"StartupFiles\S*") - - match = re.search( - pattern, - self.normalisation. - normalisationDifferentialCrossSectionFile - ) - - if match: - ( - self.normalisation. - normalisationDifferentialCrossSectionFile - ) = match.group() - - except Exception as e: - raise ParserException( - "Whilst parsing Normalisation, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing." - ) from e - - def parseSampleBackground(self): - """ - Intialises a SampleBackground object. - Parses the attributes of the SampleBackground from the input stream. - Raises a ParserException if any mandatory attributes are missing. - Returns the parsed object. - - Parameters - ---------- - None - Returns - ------- - sampleBackground : SampleBackground - The SampleBackground that was parsed from the input lines. - """ - - try: - sampleBackground = SampleBackground() - line = self.peekNextToken() - if "SAMPLE BACKGROUND" in line and "{" in line: - self.consumeTokens(1) - self.consumeWhitespace() - dataFileInfo = self.getNextToken() - numberOfFiles = nthint(dataFileInfo, 0) - sampleBackground.periodNumber = nthint(dataFileInfo, 1) - - dataFiles = [] - for _ in range(numberOfFiles): - dataFiles.append(firstword(self.getNextToken())) - sampleBackground.dataFiles = ( - DataFiles(dataFiles, "SAMPLE BACKGROUND") - ) - - # Consume whitespace and the closing brace. - self.consumeUpToDelim("}") - - return sampleBackground - except Exception as e: - raise ParserException( - "Whilst parsing Sample Background, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing." - ) from e - - def parseSample(self): - """ - Intialises a Sample object. - Parses the attributes of the Sample from the input stream. - Raises a ParserException if any mandatory attributes are missing. - Returns the parsed object. - - Parameters - ---------- - None - Returns - ------- - sample : Sample - The Sample that was parsed from the input lines. - """ - - try: - # Create a new instance of Sample. - sample = Sample() - - # Extract the sample name, and then discard whitespace lines. - sample.name = ( - str(self.getNextToken()[:-2]).strip() - .replace("SAMPLE", "").strip() - ) - sample.name = utils.replace_unwanted_chars(sample.name) - self.consumeWhitespace() - # The number of files and period number are both stored - # on the same line. - # So we extract the 0th integer for the number of files, - # and the 1st integer for the period number. - dataFileInfo = self.getNextToken() - numberOfFiles = nthint(dataFileInfo, 0) - sample.periodNumber = nthint(dataFileInfo, 1) - - # Extract data files - dataFiles = [] - for _ in range(numberOfFiles): - dataFiles.append(firstword(self.getNextToken())) - # Create a DataFiles object from the dataFiles list constructed. - sample.dataFiles = DataFiles(dataFiles, sample.name) - - # For boolean attributes, we convert the first - # integer in the line to its boolean value. - sample.forceCalculationOfCorrections = ( - boolifyNum(nthint(self.getNextToken(), 0)) - ) - - # Construct composition - composition = [] - line = self.getNextToken() - - # Extract the composition. - # Each element in the composition consists of the first 'word', - # integer at the second position, and float t the first position, - # (Atomic Symbol, MassNo, Abundance) in the line. - # If the marker line is encountered, - # then the panel has been parsed. - while "end of composition input" not in line: - - atomicSymbol = firstword(line) - massNo = nthfloat(line, 1) - abundance = nthfloat(line, 2) - - # Create an Element object and append to the composition list. - composition.append(Element(atomicSymbol, massNo, abundance)) - line = self.getNextToken() - - # Create a Composition object from the dataFiles list constructed. - sample.composition = Composition("Sample", elements=composition) - - # For enumerated attributes, - # where the member name of the attribute is - # the first 'word' in the line, and we must get the member, - # we do this: Enum[memberName]. - sample.geometry = Geometry[firstword(self.getNextToken())] - - # Is the geometry FLATPLATE? - if ( - ( - sample.geometry == Geometry.SameAsBeam - and cfg.geometry == Geometry.FLATPLATE - ) - or sample.geometry == Geometry.FLATPLATE): - # If is is FLATPLATE, then extract the upstream and downstream - # thickness, the angle of rotation and sample width. - thickness = self.getNextToken() - sample.upstreamThickness = nthfloat(thickness, 0) - sample.downstreamThickness = nthfloat(thickness, 1) - - geometryInfo = self.getNextToken() - sample.angleOfRotation = nthfloat(geometryInfo, 0) - sample.sampleWidth = nthfloat(geometryInfo, 1) - else: - - # Otherwise, it is CYLINDRICAL, - # then extract the inner and outer - # radii and the sample height. - radii = self.getNextToken() - sample.innerRadius = nthfloat(radii, 0) - sample.outerRadius = nthfloat(radii, 1) - sample.sampleHeight = nthfloat(self.getNextToken(), 0) - - # Extract the density. - density = nthfloat(self.getNextToken(), 0) - - # Decide on the units of density. - # -ve density means it is atomic (atoms/A^3) - # +ve means it is chemical (gm/cm^3) - sample.density = abs(density) - sample.densityUnits = ( - UnitsOfDensity.ATOMIC if - density < 0 - else UnitsOfDensity.CHEMICAL - ) - sample.tempForNormalisationPC = nthfloat(self.getNextToken(), 0) - crossSectionSource = firstword(self.getNextToken()) - if ( - crossSectionSource == "TABLES" - or crossSectionSource == "TRANSMISSION" - ): - sample.totalCrossSectionSource = ( - CrossSectionSource[crossSectionSource] - ) - else: - sample.totalCrossSectionSource = CrossSectionSource.FILE - sample.crossSectionFilename = crossSectionSource - sample.sampleTweakFactor = nthfloat(self.getNextToken(), 0) - - topHatW = nthfloat(self.getNextToken(), 0) - if topHatW == 0: - sample.topHatW = 0 - sample.FTMode = FTModes.NO_FT - elif topHatW < 0: - sample.topHatW = abs(topHatW) - sample.FTMode = FTModes.SUB_AVERAGE - else: - sample.topHatW = topHatW - sample.FTMode = FTModes.ABSOLUTE - - sample.minRadFT = nthfloat(self.getNextToken(), 0) - sample.grBroadening = nthfloat(self.getNextToken(), 0) - - # Extract the resonance values. - # Each row consists of the first 2 floats. - # (minWavelength, maxWavelength) in the line. - # If the marker line is encountered, - # then the values has been parsed. - line = self.getNextToken() - while ( - "to finish specifying wavelength range of resonance" - not in line - ): - sample.resonanceValues.append( - extract_floats_from_string(line) - ) - line = self.getNextToken() - - # Extract the exponential values. - # Each row consists of the first 3 numbers. - # (Amplitude, Decay, N) in the line. - # If the marker line is encountered, - # then the values has been parsed. - line = self.getNextToken() - if "to specify end of exponential parameter input" not in line: - sample.exponentialValues = [] - while "to specify end of exponential parameter input" not in line: - sample.exponentialValues.append( - extract_nums_from_string(line) - ) - - line = self.getNextToken() - - sample.normalisationCorrectionFactor = ( - nthfloat(self.getNextToken(), 0) - ) - sample.fileSelfScattering = firstword(self.getNextToken()) - sample.normaliseTo = ( - NormalisationType[ - NormalisationType(nthint(self.getNextToken(), 0)).name - ] - ) - sample.maxRadFT = nthfloat(self.getNextToken(), 0) - sample.outputUnits = ( - OutputUnits[OutputUnits(nthint(self.getNextToken(), 0)).name] - ) - sample.powerForBroadening = nthfloat(self.getNextToken(), 0) - sample.stepSize = nthfloat(self.getNextToken(), 0) - sample.runThisSample = boolifyNum(nthint(self.getNextToken(), 0)) - environmentValues = self.getNextToken() - sample.scatteringFraction = nthfloat(environmentValues, 0) - sample.attenuationCoefficient = nthfloat(environmentValues, 1) - - # Consume whitespace and the closing brace. - self.consumeUpToDelim("}") - - return sample - - except Exception as e: - raise ParserException( - "Whilst parsing Sample, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing." - ) from e - - def parseContainer(self): - """ - Intialises a Container object. - Parses the attributes of the Container from the input stream. - Raises a ParserException if any mandatory attributes are missing. - Returns the parsed object. - - Parameters - ---------- - None - Returns - ------- - container : Container - The Container that was parsed from the input lines. - """ - - try: - # Create a new instance of Container. - container = Container() - - # Extract the name from the lines, - # and then discard the unnecessary lines. - container.name = ( - str(self.getNextToken()[:-2]).strip() - .replace("CONTAINER", "").strip() - ) - self.consumeWhitespace() - - # The number of files and period number are both stored - # on the same line. - # So we extract the 0th integer for the number of files, - # and the 1st integer for the period number. - dataFileInfo = self.getNextToken() - numberOfFiles = nthint(dataFileInfo, 0) - container.periodNumber = nthint(dataFileInfo, 1) - - # Extract data files - dataFiles = [] - for _ in range(numberOfFiles): - dataFiles.append(firstword(self.getNextToken())) - - # Create a DataFiles object from the dataFiles list constructed. - container.dataFiles = DataFiles(dataFiles, container.name) - - # Construct composition - composition = [] - line = self.getNextToken() - # Extract the composition. - # Each element in the composition consists of the first 'word', - # integer at the second position, and float t the first position, - # (Atomic Symbol, MassNo, Abundance) in the line. - # If the marker line is encountered, - # then the panel has been parsed. - while "end of composition input" not in line: - - atomicSymbol = firstword(line) - massNo = nthfloat(line, 1) - abundance = nthfloat(line, 2) - - # Create an Element object and append to the composition list. - composition.append(Element(atomicSymbol, massNo, abundance)) - line = self.getNextToken() - # Create a Composition object from the dataFiles list constructed. - container.composition = Composition( - "Container", - elements=composition - ) - - # For enumerated attributes, - # where the member name of the attribute is - # the first 'word' in the line, and we must get the member, - # we do this: Enum[memberName]. - container.geometry = Geometry[firstword(self.getNextToken())] - - # Is the geometry FLATPLATE? - if ( - ( - container.geometry == Geometry.SameAsBeam - and cfg.geometry == Geometry.FLATPLATE - ) - or container.geometry == Geometry.FLATPLATE): - # If is is FLATPLATE, then extract the upstream and downstream - # thickness, the angle of rotation and sample width. - thickness = self.getNextToken() - container.upstreamThickness = nthfloat(thickness, 0) - container.downstreamThickness = nthfloat(thickness, 1) - - geometryValues = self.getNextToken() - container.angleOfRotation = nthfloat(geometryValues, 0) - container.sampleWidth = nthfloat(geometryValues, 1) - else: - - # Otherwise, it is CYLINDRICAL, - # then extract the inner and outer - # radii and the sample height. - radii = self.getNextToken() - container.innerRadius = nthfloat(radii, 0) - container.outerRadius = nthfloat(radii, 1) - container.sampleHeight = nthfloat(self.getNextToken(), 0) - - # Extract the density. - density = nthfloat(self.getNextToken(), 0) - - # Take the absolute value of the density - since it could be -ve. - container.density = abs(density) - - # Decide on the units of density. - # -ve density means it is atomic (atoms/A^3) - # +ve means it is chemical (gm/cm^3) - container.densityUnits = ( - UnitsOfDensity.ATOMIC if - density < 0 - else UnitsOfDensity.CHEMICAL - ) - crossSectionSource = firstword(self.getNextToken()) - if ( - crossSectionSource == "TABLES" - or crossSectionSource == "TRANSMISSION" - ): - container.totalCrossSectionSource = ( - CrossSectionSource[crossSectionSource] - ) - else: - container.totalCrossSectionSource = CrossSectionSource.FILE - container.crossSectionFilename = crossSectionSource - container.tweakFactor = nthfloat(self.getNextToken(), 0) - - environmentValues = self.getNextToken() - container.scatteringFraction = nthfloat(environmentValues, 0) - container.attenuationCoefficient = nthfloat(environmentValues, 1) - - # Consume whitespace and the closing brace. - self.consumeUpToDelim("}") - - return container - - except Exception as e: - raise ParserException( - "Whilst parsing Container, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing." - ) from e - - def parseComponents(self): - try: - while self.stream: - component = self.parseComponent() - if component: - self.components.addComponent(component) - except Exception as e: - raise ParserException( - "Whilst parsing Components, an exception occured." - " The input file is most likely of an incorrect format." - ) from e - - def parseComponent(self): - name = self.getNextToken().rstrip() - component = Component(name) - line = self.peekNextToken() - if "(" in line: - self.consumeTokens(1) - else: - return - line = self.getNextToken() - while line and ")" not in line: - atomicSymbol, massNo, abundance = line.split() - element = Element(atomicSymbol, float(massNo), float(abundance)) - component.addElement(element) - line = self.getNextToken() - return component - - def makeParse(self, key): - """ - Calls a parsing function from a dictionary of parsing functions - by the input key. - Returns the result of the called parsing function. - Only use case is as a helper function during parsing. - - Parameters - ---------- - key : str - Parsing function to call - (INSTRUMENT/BEAM/NORMALISATION/SAMPLE BACKGROUND/SAMPLE/CONTAINER) - Returns - ------- - NoneType - if parsing INSTRUMENT/BEAM/NORMALISATION - SampleBackground - if parsing SAMPLE BACKGROUND - Sample - if parsing Sample - Container - if parsing Container - None - if parsing Components - """ - - parsingFunctions = { - "INSTRUMENT": self.parseInstrument, - "BEAM": self.parseBeam, - "NORMALISATION": self.parseNormalisation, - "SAMPLE BACKGROUND": self.parseSampleBackground, - "SAMPLE": self.parseSample, - "CONTAINER": self.parseContainer, - "COMPONENTS": self.parseComponents - } - # Return the result of the parsing function that was called. - return parsingFunctions[key]() - - def sampleBackgroundHelper(self): - """ - Helper method for parsing Sample Background and its - Samples and their Containers. - Returns the SampleBackground object. - Parameters - ---------- - None - Returns - ------- - SampleBackground - The SampleBackground parsed from the lines. - """ - - # Parse sample background. - sampleBackground = self.makeParse("SAMPLE BACKGROUND") + def setGudrunDir(self, dir): + self.instrument.GudrunInputFileDir = dir - self.consumeWhitespace() - line = self.peekNextToken() + def convertToSample(self, container, persist=False): - # Parse all Samples and Containers belonging to the sample background. - while "END" not in line and "SAMPLE BACKGROUND" not in line: - if not line: - raise ParserException("Unexpected EOF during parsing.") - elif "GO" in line: - self.getNextToken() - elif "SAMPLE" in line and firstword(line) == "SAMPLE": - sample = self.makeParse("SAMPLE") - if not sample.name: - sample.name = utils.replace_unwanted_chars(uniquifyName( - "SAMPLE", - [s.name for s in sampleBackground.samples], - sep="", - incFirst=True - )) - sampleBackground.samples.append(sample) - elif "CONTAINER" in line and firstword(line) == "CONTAINER": - container = self.makeParse("CONTAINER") - if not container.name: - container.name = uniquifyName( - "CONTAINER", - [c.name - for c in sampleBackground.samples[-1].containers], - sep="", - incFirst=True) - sampleBackground.samples[-1].containers.append( - container - ) - self.consumeWhitespace() - line = self.peekNextToken() - return sampleBackground + sample = container.convertToSample() - def parse(self, path, config=False): - """ - Parse the GudrunFile from its path. - Assign objects from the file to the attributes of the class. - Raises ParserException if Instrument, - Beam or Normalisation are missing. + if persist: + for i, sampleBackground in enumerate(self.sampleBackgrounds): + for sample in sampleBackground.samples: + if container in sample.containers: + sample.containers.remove(container) + break + self.sampleBackgrounds[i].append(sample) + return sample - Parameters - ---------- - None - Returns - ------- - None - """ - self.config = config - # Ensure only valid files are given. - if not path: - raise ParserException( - "Path not supplied. Cannot parse from an empty path!" - ) - if not os.path.exists(path): - raise ParserException( - "The path supplied is invalid.\ - Cannot parse from an invalid path" + path + def determineError(self, sample): + gudPath = sample.dataFiles[0].replace( + self.instrument.dataFileType, + "gud" + ) + gudFile = GudFile( + os.path.join( + self.instrument.GudrunInputFileDir, gudPath ) - if self.format == Format.YAML: - # YAML Files - try: - ( - self.instrument, - self.beam, - self.components, - self.normalisation, - self.sampleBackgrounds, - cfg.GUI - ) = self.yaml.parseYaml(path) - except YAMLException as e: - raise ParserException(e) - else: - # TXT Files - parsing = "" - KEYWORDS = { - "INSTRUMENT": False, - "BEAM": False, - "NORMALISATION": False - } - - # Decide the encoding - import chardet - with open(path, 'rb') as fp: - encoding = chardet.detect(fp.read())['encoding'] - - # Read the input stream into our attribute. - with open(path, encoding=encoding) as fp: - self.stream = fp.readlines() - - # Here we go! Get the first token and begin parsing. - line = self.getNextToken() - - # Iterate through the file, - # parsing the Instrument, Beam and Normalisation. - while ( - self.stream - and not all(value for value in KEYWORDS.values()) - ): - if ( - firstword(line) in KEYWORDS.keys() - and not KEYWORDS[firstword(line)] - ): - parsing = firstword(line) - self.makeParse(parsing) - KEYWORDS[parsing] = True - line = self.getNextToken() - - # If we didn't parse each one of the keywords, then panic. - if not all(KEYWORDS.values()) and not config: - raise ParserException(( - 'INSTRUMENT, BEAM and NORMALISATION' - ' were not parsed. It\'s possible the file' - ' supplied is of an incorrect format!' - )) - elif not KEYWORDS["INSTRUMENT"] and config: - raise ParserException(( - 'INSTRUMENT was not parsed. It\'s possible the file' - ' supplied is of an incorrect format!' - )) + ) + error = round( + ( + 1.0 - (gudFile.averageLevelMergedDCS / gudFile.expectedDCS) + ) * 100, 1 + ) + return error - # Ignore whitespace. - self.consumeWhitespace() - line = self.peekNextToken() + def runSamples(self): + return [ + s for sb in self.sampleBackgrounds + for s in sb.samples + if s.runThisSample] - # Parse sample backgrounds, alongside their samples and containers. - while self.stream: - if "SAMPLE BACKGROUND" in line and "{" in line: - self.sampleBackgrounds.append( - self.sampleBackgroundHelper() - ) - elif "COMPONENTS:" in line: - self.makeParse("COMPONENTS") - line = self.getNextToken() + def checkNormDataFiles(self): + return (len(self.normalisation.dataFiles) + and len(self.normalisation.dataFilesBg)) def __str__(self): """ @@ -1467,119 +223,3 @@ def __str__(self): + footer + components ) - - def save(self, path='', format=None): - if not path: - path = self.path() - - if not format: - format = self.format - if format == Format.TXT: - self.write_out( - path=f"{os.path.splitext(path)[0]}.txt", overwrite=True) - elif format == Format.YAML: - self.write_yaml(path=f"{os.path.splitext(path)[0]}.yaml") - - def write_yaml(self, path): - self.yaml = YAML() - self.yaml.writeYAML(self, path) - - def write_out(self, path='', overwrite=False, writeParameters=True): - """ - Writes out the string representation of the GudrunFile. - If 'overwrite' is True, then the initial file is overwritten. - Otherwise, it is written to 'gudpy_{initial filename}.txt'. - - Parameters - ---------- - overwrite : bool, optional - Overwrite the initial file? (default is False). - path : str, optional - Path to write to. - Returns - ------- - None - """ - if path: - if not overwrite: - assert (not os.path.exists(path)) - f = open( - path, "w", encoding="utf-8" - ) - elif not overwrite: - assert (not os.path.exists(os.path.join( - self.instrument.GudrunInputFileDir, - self.OUTPATH) - )) - f = open( - os.path.join( - self.instrument.GudrunInputFileDir, - self.OUTPATH - ), "w", encoding="utf-8") - else: - if not self.path(): - path = os.path.join( - self.instrument.GudrunInputFileDir, - self.OUTPATH) - f = open(path, "w", encoding="utf-8") - - if os.path.basename(f.name) == self.OUTPATH: - for sampleBackground in self.sampleBackgrounds: - sampleBackground.writeAllSamples = False - f.write(str(self)) - f.close() - - if writeParameters: - for sb in self.sampleBackgrounds: - for s in sb.samples: - if s.runThisSample: - gf = deepcopy(self) - gf.sampleBackgrounds = [deepcopy(sb)] - gf.sampleBackgrounds[0].samples = [deepcopy(s)] - gf.write_out( - path=os.path.join( - self.instrument.GudrunInputFileDir, - s.pathName(), - ), - overwrite=True, - writeParameters=False - ) - - def setGudrunDir(self, dir): - self.instrument.GudrunInputFileDir = dir - - def convertToSample(self, container, persist=False): - - sample = container.convertToSample() - - if persist: - for i, sampleBackground in enumerate(self.sampleBackgrounds): - for sample in sampleBackground.samples: - if container in sample.containers: - sample.containers.remove(container) - break - self.sampleBackgrounds[i].append(sample) - return sample - - def determineError(self, sample): - gudPath = sample.dataFiles[0].replace( - self.instrument.dataFileType, - "gud" - ) - gudFile = GudFile( - os.path.join( - self.instrument.GudrunInputFileDir, gudPath - ) - ) - error = round( - ( - 1.0 - (gudFile.averageLevelMergedDCS / gudFile.expectedDCS) - ) * 100, 1 - ) - return error - - -Container.getNextToken = GudrunFile.getNextToken -Container.peekNextToken = GudrunFile.peekNextToken -Container.consumeUpToDelim = GudrunFile.consumeUpToDelim -Container.consumeWhitespace = GudrunFile.consumeWhitespace diff --git a/gudpy/__init__.py b/gudpy/core/io/__init__.py similarity index 100% rename from gudpy/__init__.py rename to gudpy/core/io/__init__.py diff --git a/gudpy/core/io/gudpy_io.py b/gudpy/core/io/gudpy_io.py new file mode 100644 index 00000000..dfd2cd33 --- /dev/null +++ b/gudpy/core/io/gudpy_io.py @@ -0,0 +1,474 @@ +from abc import abstractmethod +from enum import Enum +from ruamel.yaml import YAML as yaml +from ruamel.yaml import YAMLError +import shutil +import os +import typing as typ + +from core.composition import ( + Component, Components, Composition, WeightedComponent +) +from core.data_files import DataFile, DataFiles +from core.element import Element +from core.exception import YAMLException +from core.gui_config import GUIConfig +from core import utils +from core.instrument import Instrument +from core.beam import Beam +from core.normalisation import Normalisation +from core.sample_background import SampleBackground +from core.sample import Sample +from core.container import Container +from core.gudrun_file import GudrunFile +from core.io.gudrun_file_parser import GudrunFileParser +from core import config + + +class GudPyIO: + projectDir = "" + loadFile = "" + + def __init__(self): + self.yaml = self._getYamlModule() + self.gudrunFileParser = GudrunFileParser() + + @classmethod + def projectName(cls) -> str: + return os.path.dirname(cls.projectDir) + + @classmethod + def autosavePath(cls) -> str: + return f"{os.path.basename(cls.projectDir)}.autosave" + + @classmethod + def path(cls): + """Returns the path to the yaml file + + Returns + ------- + str + Path to yaml file + + Raises + ------ + RuntimeError + Raised if project directory is not set + """ + if not cls.projectDir: + raise RuntimeError("Save location not set.") + return os.path.join(cls.projectDir, f"{cls.projectName()}.yaml") + + @classmethod + def setSaveLocation(cls, projectDir: str): + """Sets the save location/project directory + + Parameters + ---------- + projectDir : str + Path to new save location + """ + cls.projectDir = projectDir + + @classmethod + def checkSaveLocation(cls) -> bool: + """Checks if user has set the save location + """ + return bool(cls.projectDir) + + def save(self, gudrunFile): + """Saves yaml file to project directory + """ + self._writeYAML(gudrunFile, self.path()) + + def importGudrunFile(self, path, config=False) -> GudrunFile: + GudPyIO.loadFile = path + return self.gudrunFileParser.parseFromPath(path, config) + + def importProject(self, projectDir) -> GudrunFile: + """Imports from a project directory + + Parameters + ---------- + projectDir : str + Path to GudPy project folder + + Raises + ------ + FileNotFoundError + Raised if there is no YAML input file in the + project directory + + Returns + ------- + GudrunFile + Genererated GudrunFile from input + """ + loadFile = "" + + if os.path.exists(os.path.join( + projectDir, + f"{os.path.basename(projectDir)}.yaml" + )): + # If default file exists + loadFile = os.path.join( + projectDir, + f"{os.path.basename(projectDir)}.yaml" + ) + else: + # Try to find yaml files + for f in os.listdir(projectDir): + if os.path.splitext(f)[1] == ".yaml": + # If file is yaml + loadFile = os.path.join(projectDir, f) + if not loadFile: + raise FileNotFoundError( + "Could not find GudPy input file within the project") + + self.setSaveLocation(projectDir) + return self._parseYaml(loadFile) + + def importFromYamlFile(self, loadFile) -> GudrunFile: + return self._parseYaml(loadFile) + + def exportProject(self, gudrunFile, targetDir): + if os.path.exists(targetDir): + raise IsADirectoryError("Cannot be an existing directory") + + oldFile = os.path.join(targetDir, os.path.basename(self.loadFile)) + os.makedirs(targetDir) + + if os.path.exists(self.projectDir) and os.path.exists(oldFile): + shutil.copytree(self.projectDir, targetDir) + os.rename(oldFile, self.path()) + else: + yamlPath = os.path.join( + targetDir, + f"{os.path.basename(targetDir)}.yaml" + ) + self.exportYamlFile(gudrunFile, yamlPath) + + def exportYamlFile(self, gudrunFile, path): + self._writeYAML(gudrunFile, path) + + @classmethod + def exportGudrunFile(cls, gudrunFile, path): + return GudrunFileParser.export(gudrunFile, path) + + @classmethod + def writeGudrunFile(cls, gudrunFile, path): + GudrunFileParser.writeGudrunFile(gudrunFile, path) + + @classmethod + def writeObject(cls, obj, path): + with open(path, "w", encoding="utf-8") as f: + f.write(str(obj)) + f.close() + +# ================================================================= +# PARSING HELPERS +# ================================================================= + + def _getYamlModule(self) -> yaml: + yaml_ = yaml() + yaml_.preserve_quotes = True + yaml_.default_flow_style = None + yaml_.encoding = 'utf-8' + return yaml_ + + def _parseYaml(self, path) -> GudrunFile: + self.loadFile = path + yamldict = self._yamlToDict(path) + gudrunFile = GudrunFile() + for k, v in yamldict.items(): + if k == "GUI": + GUI = GUIConfig() + self._yamlToObject(GUI, v) + config.GUIConfig = GUI + continue + if not hasattr(gudrunFile, k): + # If attribute is not valid + print(f"Ignoring depreciated attribute '{k}'") + continue + + parsingFunc = self._parsingFuncMap(k) + if parsingFunc: + try: + obj = parsingFunc(v) + setattr(gudrunFile, k, obj) + except YAMLException as e: + raise YAMLException(e) + except YAMLError as e: + # Exception caused by yaml parsing library + raise YAMLException(f"Invalid YAML file: {str(e)}") + except Exception as e: + raise YAMLException(f"Failed at: '{k}'. Details: {e}") + return gudrunFile + + def _writeYAML(self, gudrunFile: GudrunFile, path: str) -> None: + with open(path, "wb") as fp: + outyaml = { + "instrument": gudrunFile.instrument, + "beam": gudrunFile.beam, + "components": gudrunFile.components.components, + "normalisation": gudrunFile.normalisation, + "sampleBackgrounds": gudrunFile.sampleBackgrounds, + "GUI": config.GUI + } + self.yaml.dump( + {k: self.toYaml(v) for k, v in outyaml.items()}, + fp + ) + + def _parsingFuncMap(self, key) -> typ.Union[typ.Callable, None]: + parsingFuncs = { + "instrument": self._parseYamlInstrument, + "beam": self._parseYamlBeam, + "normalisation": self._parseYamlNormalisation, + "sampleBackgrounds": self._parseYamlSampleBackground, + "sample": self._parseYamlSample, + "composition": self._parseYamlComposition, + "component": self._parseYamlComponent, + "components": self._parseYamlComponents, + "elements": self._parseYamlElements, + "dataFiles": self._parseYamlDataFiles, + "dataFilesBg": self._parseYamlDataFiles + } + if key in parsingFuncs.keys(): + return parsingFuncs[key] + else: + return None + + def _yamlToDict(self, path) -> any: + # Read the input stream into our attribute. + with open(path, encoding=self.yaml.encoding) as fp: + return self.yaml.load(fp) + + def _yamlToObject(self, obj: any, yamldict: any) -> any: + for k, v in yamldict.items(): + self._assignYamlToAttr(obj, k, v) + return obj + + def _assignYamlToAttr(self, obj: any, key, val) -> None: + if not hasattr(obj, key): + # If attribute is not valid + print( + f"Ignoring depreciated attribute '{key}'" + f"given to '{type(obj).__name__}'" + ) + return + + try: + if isinstance(obj.__dict__[key], Enum): + setattr(obj, key, type(obj.__dict__[key])[val]) + return + + setattr(obj, key, type(obj.__dict__[key])(self.toBuiltin(val))) + except Exception as e: + raise YAMLException( + "Parsing failed while trying to assign attribute" + f"'{key}' to {type(obj).__name__}") from e + + def _parseYamlInstrument(self, yamldict: any) -> Instrument: + instrument = Instrument() + instrument = self._yamlToObject(instrument, yamldict) + return instrument + + def _parseYamlBeam(self, yamldict: any) -> Normalisation: + beam = Beam() + beam = self._yamlToObject(beam, yamldict) + return beam + + def _parseYamlNormalisation(self, yamldict: any) -> Normalisation: + normalisation = Normalisation() + for k, v in yamldict.items(): + if k == "composition": + composition = self._parseYamlComposition(v) + setattr(normalisation, k, composition) + elif k == "dataFiles" or k == "dataFilesBg": + dataFiles = self._parseYamlDataFiles(v) + setattr(normalisation, k, dataFiles) + else: + self._assignYamlToAttr(normalisation, k, v) + return normalisation + + def _parseYamlSample(self, yamldict: any) -> Sample: + sample = Sample() + for k, v in yamldict.items(): + if k == "dataFiles": + dataFiles = self._parseYamlDataFiles(v) + setattr(sample, k, dataFiles) + elif k == "composition": + composition = self._parseYamlComposition(v) + setattr(sample, k, composition) + elif k == "containers": + for contyaml in yamldict[k]: + container = self._parseYamlContainer(contyaml) + sample.containers.append(container) + else: + self._assignYamlToAttr(sample, k, v) + return sample + + def _parseYamlSampleBackground( + self, yamllist: list + ) -> list[SampleBackground]: + sampleBackgrounds = [] + for sbg in yamllist: + sampleBg = SampleBackground() + for k, v in sbg.items(): + if k == "samples": + for sampleyaml in v: + sample = self._parseYamlSample(sampleyaml) + sample.name = utils.replace_unwanted_chars(sample.name) + sampleBg.samples.append(sample) + elif k == "dataFiles": + dataFiles = self._parseYamlDataFiles(v) + setattr(sampleBg, k, dataFiles) + else: + self._assignYamlToAttr(sampleBg, k, v) + sampleBackgrounds.append(sampleBg) + return sampleBackgrounds + + def _parseYamlComposition(self, yamldict: any) -> Composition: + composition = Composition(yamldict["type_"]) + for k, v in yamldict.items(): + if k == "elements": + elements = self._parseYamlElements(yamldict[k]) + setattr(composition, k, elements) + elif k == "weightedComponents": + weightedComponents = [] + for wc in yamldict[k]: + component = self._parseYamlComponent(wc) + ratio = wc["ratio"] + try: + weightedComponents.append( + WeightedComponent( + component, float(ratio)) + ) + except ValueError: + raise YAMLException( + "Invalid ratio given to Weighted Component") + setattr(composition, k, weightedComponents) + else: + self._assignYamlToAttr(composition, k, v) + return composition + + def _parseYamlContainer(self, yamldict: any) -> Container: + container = Container() + for k, v in yamldict.items(): + if k == "composition": + composition = self._parseYamlComposition(v) + setattr(container, k, composition) + elif k == "dataFiles": + dataFiles = self._parseYamlDataFiles(v) + setattr(container, k, dataFiles) + else: + self._assignYamlToAttr(container, k, v) + return container + + def _parseYamlDataFiles(self, yamldict: any) -> DataFiles: + dataFiles = DataFiles([], yamldict["name"]) + for df in yamldict["_dataFiles"]: + dataFile = DataFile(df["filename"]) + for k, v in df.items(): + if k == "_outputs": + outDict = {} + for k_, v_ in v: + outDict[k_] = v_ + dataFile._outputs = outDict + else: + self._assignYamlToAttr(dataFile, k, v) + dataFiles.dataFiles.append(dataFile) + return dataFiles + + def _parseYamlComponent(self, yamldict: any) -> Component: + if ( + "component" not in yamldict + or "ratio" not in yamldict + ): + raise YAMLException( + "Weighted Component expects 'component' and" + + " 'ratio' to be provided") + component = Component() + self._yamlToObject( + component, yamldict["component"] + ) + return component + + def _parseYamlComponents(self, yamllist: list[dict]) -> Components: + components = [] + for c in yamllist: + components.append(self._parseYamlComponent(c)) + return Components(components) + + def _parseYamlElements(self, yamllist: list[str]) -> list[Element]: + elements = [] + for idx, element in enumerate(yamllist): + # Ensuring correct arguements are provided + if ( + "atomicSymbol" not in element + or "massNo" not in element + or "abundance" not in element + ): + raise YAMLException( + "Insufficient arguments given to element" + + f" {idx + 1}. Expects 'atomicSymbol', 'massNo'" + + " and 'abundance'" + ) + + # Setting element properties + try: + element_ = Element( + **{ + "atomicSymbol": element["atomicSymbol"], + "massNo": float(element["massNo"]), + "abundance": float(element["abundance"]) + } + ) + elements.append(element_) + except ValueError as e: + raise YAMLException( + f"Invalid number given to element {idx + 1}:" + f"{e}") + return elements + + @abstractmethod + def toYaml(self, var: any): + if var.__class__.__module__ == "ruamel.yaml.scalarfloat": + return float(var) + if var.__class__.__module__ == "builtins": + if isinstance(var, (list, tuple)): + return type(var)([self.toYaml(v) for v in var]) + else: + return var + elif isinstance(var, Enum): + return type(var)(var.value).name + elif isinstance(var, ( + Instrument, Beam, Components, Normalisation, + SampleBackground, Sample, Container, WeightedComponent, + Component, Composition, Element, DataFiles, DataFile, GUIConfig + )): + return { + k: self.toYaml(v) + for k, v in var.__dict__.items() + if k not in var.yamlignore + } + + @abstractmethod + def toBuiltin(self, yamlvar: any): + if yamlvar is None: + return None + if isinstance(yamlvar, (list, tuple)): + return [self.toBuiltin(v) for v in yamlvar] + elif (yamlvar.__class__.__module__ == + "ruamel.yaml.comments.CommentedMap"): + dict = {} + for k, v in yamlvar.items(): + dict[k] = v + return dict + elif yamlvar.__class__.__module__ == "builtins": + return yamlvar + elif yamlvar.__class__.__module__ == "ruamel.yaml.scalarfloat": + return float(yamlvar) + elif yamlvar.__class__.__module__ == "ruamel.yaml.scalarstring": + return str(yamlvar) diff --git a/gudpy/core/io/gudrun_file_parser.py b/gudpy/core/io/gudrun_file_parser.py new file mode 100644 index 00000000..c82c52e6 --- /dev/null +++ b/gudpy/core/io/gudrun_file_parser.py @@ -0,0 +1,1201 @@ +import re +import os +import typing as typ + +from core.composition import ( + Component, Components, Composition +) +from core.data_files import DataFiles +from core.element import Element +from core.exception import ParserException +from core import utils +from core.instrument import Instrument +from core.beam import Beam +from core.normalisation import Normalisation +from core.sample_background import SampleBackground +from core.sample import Sample +from core.container import Container +from core.gudrun_file import GudrunFile +from core import config +import core.enums as enums + + +class GudrunFileParser: + """Class to manage the parsing of Gudrun input files + """ + + def __init__(self): + # Text stream + self.stream = None + self.config = False + + @classmethod + def writeGudrunFileTo(cls, gudrunFile: GudrunFile, path: str): + gudrunFile.setGudrunDir(os.path.dirname(path)) + for sampleBackground in gudrunFile.sampleBackgrounds: + sampleBackground.writeAllSamples = False + + with open(path, "w", encoding="utf-8") as f: + f.write(str(gudrunFile)) + f.close() + + @classmethod + def writeGudrunFile(cls, gudrunFile: GudrunFile, runDir: str): + cls.writeGudrunFileTo(gudrunFile, os.path.join( + runDir, GudrunFile.OUTPATH)) + + @classmethod + def export(cls, gudrunFile: GudrunFile, path: str): + for sampleBackground in gudrunFile.sampleBackgrounds: + sampleBackground.writeAllSamples = True + + with open(path, "w", encoding="utf-8") as f: + f.write(str(gudrunFile)) + f.close() + + def parse(self, lines: list[str], config=False) -> GudrunFile: + """ + Parse the GudrunFile from a list of lines. + Assign objects from the file to the attributes of the class. + Raises ParserException if Instrument, + Beam or Normalisation are missing. + """ + instrument = Instrument() + beam = Beam() + components = Components() + normalisation = Normalisation() + sampleBackgrounds = [] + + KEYWORDS = { + "INSTRUMENT": False, + "BEAM": False, + "NORMALISATION": False + } + self.stream = lines + # Here we go! Get the first token and begin parsing. + line = self._getNextToken() + + # Iterate through the file, + # parsing the Instrument, Beam and Normalisation. + while (self.stream and not all(value for value in KEYWORDS.values())): + line = self._getNextToken() + kwd = utils.firstword(line) + if kwd == "INSTRUMENT": + instrument = self._parseInstrument() + KEYWORDS[kwd] = True + elif kwd == "BEAM": + beam = self._parseBeam() + KEYWORDS[kwd] = True + elif kwd == "NORMALISATION": + normalisation = self._parseNormalisation() + KEYWORDS[kwd] = True + # If we didn't parse each one of the keywords, then panic. + if not all(KEYWORDS.values()) and not config: + unparsed = [] + for k, v in KEYWORDS.items(): + if not v: + unparsed.append(k) + raise ParserException(( + 'INSTRUMENT, BEAM, NORMALISATION,' + ' were not parsed. It\'s possible the file' + ' supplied is of an incorrect format!' + )) + elif not KEYWORDS["INSTRUMENT"] and config: + raise ParserException(( + 'INSTRUMENT was not parsed. It\'s possible the file' + ' supplied is of an incorrect format!' + )) + + # Ignore whitespace. + self._consumeWhitespace() + line = self._peekNextToken() + + # Parse sample backgrounds, alongside their samples and containers. + while self.stream: + if "SAMPLE BACKGROUND" in line and "{" in line: + sampleBackgrounds.append( + self._sampleBackgroundHelper() + ) + elif "COMPONENTS:" in line: + components = self._parseComponents(line) + line = self._getNextToken() + + return GudrunFile( + instrument=instrument, + beam=beam, + normalisation=normalisation, + sampleBackgrounds=sampleBackgrounds, + components=components + ) + + def parseFromPath( + self, path: str, config=False + ) -> typ.Tuple[ + Instrument, Beam, Normalisation, + list[SampleBackground], Components]: + """ + Parse the GudrunFile from its path. + Assign objects from the file to the attributes of the class. + Raises ParserException if Instrument, + Beam or Normalisation are missing. + """ + self.config = config + + # Ensure only valid files are given. + if not path: + raise ParserException( + "Path not supplied. Cannot parse from an empty path!" + ) + if not os.path.exists(path): + raise ParserException( + "The path supplied is invalid.\ + Cannot parse from an invalid path" + path + ) + + # Decide the encoding + import chardet + with open(path, 'rb') as fp: + encoding = chardet.detect(fp.read())['encoding'] + # Read the input stream into our attribute. + with open(path, encoding=encoding) as fp: + lines = fp.readlines() + return self.parse(lines, config) + +# ================================================================= +# PARSING HELPERS +# ================================================================= + + def _getNextToken(self) -> typ.Union[str, None]: + """ + Pops the 'next token' from the stream and returns it. + Essentially removes the first line in the stream and returns it. + """ + return self.stream.pop(0) if self.stream else None + + def _peekNextToken(self) -> typ.Union[str, None]: + """ + Returns the next token in the input stream, without removing it. + """ + return self.stream[0] if self.stream else None + + def _consumeTokens(self, n: int) -> None: + """ + Consume n tokens from the input stream. + """ + for _ in range(n): + self._getNextToken() + + def _consumeUpToDelim(self, delim: str) -> None: + """ + Consume tokens iteratively, until a delimiter is reached. + """ + line = self._getNextToken() + while line[0] != delim: + line = self._getNextToken() + + def _consumeWhitespace(self) -> None: + """ + Consume tokens iteratively, while they are whitespace. + """ + line = self._peekNextToken() + if line and line.isspace(): + self._getNextToken() + line = self._peekNextToken() + + def _parseInstrument(self) -> Instrument: + """ + Intialises an Instrument object and assigns it to the + instrument attribute. + Parses the attributes of the Instrument from the input stream. + Raises a ParserException if any mandatory attributes are missing. + """ + try: + instrument = Instrument() + self._consumeWhitespace() + + # For string attributes, + # we simply extract the utils.firstword in the line. + instrument.name = enums.Instruments[ + utils.firstword(self._getNextToken())] + self._consumeTokens(1) + instrument.dataFileDir = os.path.abspath( + utils.firstword(self._getNextToken())) + os.path.sep + instrument.dataFileType = utils.firstword(self._getNextToken()) + instrument.detectorCalibrationFileName = ( + utils.firstword(self._getNextToken()) + ) + + # For single integer attributes, + # we extract the zeroth int from the line. + instrument.columnNoPhiVals = utils.nthint(self._getNextToken(), 0) + instrument.groupFileName = utils.firstword(self._getNextToken()) + instrument.deadtimeConstantsFileName = ( + utils.firstword(self._getNextToken()) + ) + + # For N integer attributes, + # we extract the first N integers from the line. + instrument.spectrumNumbersForIncidentBeamMonitor = ( + utils.extract_ints_from_string(self._getNextToken()) + ) + + # For integer pair attributes, + # we extract the first 2 integers from the line. + instrument.wavelengthRangeForMonitorNormalisation = ( + utils.firstNFloats(self._getNextToken(), 2) + ) + + if all( + instrument.wavelengthRangeForMonitorNormalisation + ) == 0.0: + instrument.wavelengthRangeForMonitorNormalisation = [ + 0, 0 + ] + + instrument.spectrumNumbersForTransmissionMonitor = ( + utils.extract_ints_from_string(self._getNextToken()) + ) + + # For single float attributes, + # we extract the zeroth float from the line. + instrument.incidentMonitorQuietCountConst = ( + utils.nthfloat(self._getNextToken(), 0) + ) + instrument.transmissionMonitorQuietCountConst = ( + utils.nthfloat(self._getNextToken(), 0) + ) + + instrument.channelNosSpikeAnalysis = ( + utils.firstNInts(self._getNextToken(), 2) + ) + instrument.spikeAnalysisAcceptanceFactor = ( + utils.nthfloat(self._getNextToken(), 0) + ) + + # Extract wavelength range + # Which consists of the first 3 floats + # (min, max, step) in the line. + wavelengthRange = utils.firstNFloats(self._getNextToken(), 3) + instrument.wavelengthMin = wavelengthRange[0] + instrument.wavelengthMax = wavelengthRange[1] + instrument.wavelengthStep = wavelengthRange[2] + + instrument.NoSmoothsOnMonitor = utils.nthint( + self._getNextToken(), 0) + + # Extract X range + # Which consists of the first 3 floats + # (min, max, step) in the line. + XRange = utils.firstNFloats(self._getNextToken(), 3) + + instrument.XMin = XRange[0] + instrument.XMax = XRange[1] + instrument.XStep = XRange[2] + + # Extract the grouping parameter panel. + # Each row in the panel consists of the first 4 ints + # (Group, XMin, XMax, Background Factor) in the line. + # If the marker line is encountered, + # then the panel has been parsed. + + line = self._getNextToken() + while "to end input of specified values" not in line: + group = utils.nthint(line, 0) + xMin = utils.nthfloat(line, 1) + xMax = utils.nthfloat(line, 2) + backgroundFactor = utils.nthfloat(line, 3) + instrument.groupingParameterPanel.append( + [group, xMin, xMax, backgroundFactor] + ) + line = self._getNextToken() + + instrument.groupsAcceptanceFactor = ( + utils.nthfloat(self._getNextToken(), 0) + ) + instrument.mergePower = utils.nthint(self._getNextToken(), 0) + + # For boolean attributes, we convert the first + # integer in the line to its boolean value. + instrument.subSingleAtomScattering = ( + utils.boolifyNum(utils.nthint(self._getNextToken(), 0)) + ) + + # For enumerated attributes, where the value of the attribute is + # the first integer in the line, and we must get the member, + # we do this: Enum[Enum(value).name] + instrument.mergeWeights = ( + enums.MergeWeights[enums.MergeWeights( + utils.nthint(self._getNextToken(), 0)).name] + ) + instrument.incidentFlightPath = ( + utils.nthfloat(self._getNextToken(), 0) + ) + instrument.spectrumNumberForOutputDiagnosticFiles = ( + utils.nthint(self._getNextToken(), 0) + ) + + instrument.neutronScatteringParametersFile = ( + utils.firstword(self._getNextToken()) + + ) + instrument.scaleSelection = ( + enums.Scales[enums.Scales( + utils.nthint(self._getNextToken(), 0)).name] + ) + instrument.subWavelengthBinnedData = ( + utils.boolifyNum(utils.nthint(self._getNextToken(), 0)) + ) + self._consumeTokens(2) + instrument.logarithmicStepSize = ( + utils.nthfloat(self._getNextToken(), 0) + ) + instrument.hardGroupEdges = ( + utils.boolifyNum(utils.nthint(self._getNextToken(), 0)) + ) + + # If NeXus files are being used, then we expect a NeXus definition + # file to be present, and extract it. + if ( + instrument.dataFileType == "NXS" + or instrument.dataFileType == "nxs" + ): + instrument.nxsDefinitionFile = ( + utils.firstword(self._getNextToken()) + ) + + if self.config: + instrument.goodDetectorThreshold = utils.nthint( + self._getNextToken(), 0 + ) + + # Consume whitespace and the closing brace. + self._consumeUpToDelim("}") + + # Resolve the paths, to make them relative. + # First construct the regular expression to match against. + pattern = re.compile(r"StartupFiles\S*") + + match = re.search( + pattern, + instrument.detectorCalibrationFileName + ) + + if match: + instrument.detectorCalibrationFileName = match.group() + + match = re.search( + pattern, + instrument.groupFileName + ) + + if match: + instrument.groupFileName = match.group() + + match = re.search( + pattern, + instrument.deadtimeConstantsFileName + ) + + if match: + instrument.deadtimeConstantsFileName = match.group() + + match = re.search( + pattern, + instrument.neutronScatteringParametersFile + ) + + if match: + instrument.neutronScatteringParametersFile = match.group() + + match = re.search( + pattern, + instrument.neutronScatteringParametersFile + ) + + if match: + instrument.neutronScatteringParametersFile = match.group() + + return instrument + + except Exception as e: + raise ParserException( + "Whilst parsing Instrument, an exception occured." + " The input file is most likely of an incorrect format, " + "and some attributes were missing." + f"{str(e)}" + ) from e + + def _parseBeam(self) -> Beam: + """ + Intialises a Beam object and assigns it to the + beam attribute. + Parses the attributes of the Beam from the input stream. + Raises a ParserException if any mandatory attributes are missing. + """ + + try: + # Initialise beam attribute to a new instance of Beam. + beam = Beam() + + self._consumeWhitespace() + + # For enumerated attributes, + # where the member name of the attribute is + # the first 'word' in the line, and we must get the member, + # we do this: Enum[memberName]. + beam.sampleGeometry = enums.Geometry[utils.firstword( + self._getNextToken())] + + # Set the global geometry. + config.geometry = beam.sampleGeometry + + # Ignore the number of beam values. + self._consumeTokens(1) + + # For N float attributes, + # we extract the first N floats from the line. + beam.beamProfileValues = ( + utils.extract_floats_from_string(self._getNextToken()) + ) + + # For single float attributes, + # we extract the zeroth float from the line. + range = self._getNextToken() + beam.stepSizeAbsorption = utils.nthfloat(range, 0) + beam.stepSizeMS = utils.nthfloat(range, 1) + beam.noSlices = utils.nthint(range, 2) + beam.angularStepForCorrections = ( + utils.nthint(self._getNextToken(), 0) + ) + + # Extract the incident beam edges + # relative to the centroid of the sample. + incidentBeamEdges = self._getNextToken() + beam.incidentBeamLeftEdge = utils.nthfloat(incidentBeamEdges, 0) + beam.incidentBeamRightEdge = utils.nthfloat(incidentBeamEdges, 1) + beam.incidentBeamBottomEdge = utils.nthfloat(incidentBeamEdges, 2) + beam.incidentBeamTopEdge = utils.nthfloat(incidentBeamEdges, 3) + + # Extract the scattered beam edges + # relative to the centroid of the sample. + scatteredBeamEdges = self._getNextToken() + beam.scatteredBeamLeftEdge = utils.nthfloat(scatteredBeamEdges, 0) + beam.scatteredBeamRightEdge = utils.nthfloat(scatteredBeamEdges, 1) + beam.scatteredBeamBottomEdge = utils.nthfloat( + scatteredBeamEdges, 2) + beam.scatteredBeamTopEdge = utils.nthfloat(scatteredBeamEdges, 3) + + # For string attributes, + # we simply extract the utils.firstword in the line. + beam.filenameIncidentBeamSpectrumParams = ( + utils.firstword(self._getNextToken()) + ) + + # Now match it against a pattern, + # to resolve the path to be relative. + pattern = re.compile(r"StartupFiles\S*") + + match = re.search( + pattern, + beam.filenameIncidentBeamSpectrumParams + ) + + if match: + beam.filenameIncidentBeamSpectrumParams = match.group() + + beam.overallBackgroundFactor = ( + utils.nthfloat(self._getNextToken(), 0) + ) + beam.sampleDependantBackgroundFactor = ( + utils.nthfloat(self._getNextToken(), 0) + ) + beam.shieldingAttenuationCoefficient = ( + utils.nthfloat(self._getNextToken(), 0) + ) + + # Consume whitespace and the closing brace. + self._consumeUpToDelim("}") + + return beam + + except Exception as e: + raise ParserException( + "Whilst parsing Beam, an exception occured." + " The input file is most likely of an incorrect format, " + "and some attributes were missing." + f"{e}" + ) from e + + def _parseNormalisation(self) -> Normalisation: + """ + Intialises a Normalisation object and assigns it to the + normalisation attribute. + Parses the attributes of the Normalisation from the input stream. + Raises a ParserException if any mandatory attributes are missing. + """ + + try: + # Initialise normalisation attribute + # to a new instance of Normalisation. + normalisation = Normalisation() + + self._consumeWhitespace() + + # The number of files and period number are both stored + # on the same line. + # So we extract the 0th integer for the number of files, + # and the 1st integer for the period number. + dataFileInfo = self._getNextToken() + numberOfFiles = utils.nthint(dataFileInfo, 0) + normalisation.periodNumber = utils.nthint(dataFileInfo, 1) + + # Extract params files + dataFiles = [] + for _ in range(numberOfFiles): + dataFiles.append(utils.firstword(self._getNextToken())) + # Sorts list so that it is in ascending order + dataFiles.sort() + + # Create a DataFiles object from the dataFiles list constructed. + normalisation.dataFiles = ( + DataFiles(dataFiles, "NORMALISATION") + ) + + # The number of background files and + # background period number are both stored + # on the same line. + # So we extract the 0th integer for the number of background files, + # and the 1st integer for the background riod number. + dataFileInfoBg = self._getNextToken() + numberOfFilesBg = utils.nthint(dataFileInfoBg, 0) + normalisation.periodNumberBg = utils.nthint(dataFileInfoBg, 1) + + # Extract background params files + dataFilesBg = [] + for j in range(numberOfFilesBg): + dataFilesBg.append(utils.firstword(self._getNextToken())) + + # Sorts list so that it is in ascending order + dataFilesBg.sort() + + # Create a DataFiles object from the dataFiles list constructed. + normalisation.dataFilesBg = ( + DataFiles(dataFilesBg, "NORMALISATION BACKGROUND") + ) + + # For boolean attributes, we convert the first + # integer in the line to its boolean value. + normalisation.forceCalculationOfCorrections = ( + utils.boolifyNum(utils.nthint(self._getNextToken(), 0)) + ) + + # Construct composition + composition = [] + line = self._getNextToken() + # Extract the composition. + # Each element in the composition consists of the first 'word', + # integer at the second position, and float at the third position, + # (Atomic Symbol, MassNo, Abundance) in the line. + # If the marker line is encountered, + # then the panel has been parsed. + while "end of composition input" not in line: + atomicSymbol = utils.firstword(line) + massNo = utils.nthfloat(line, 1) + abundance = utils.nthfloat(line, 2) + + # Create an Element object and append to the composition list. + composition.append( + Element(atomicSymbol, massNo, abundance) + ) + line = self._getNextToken() + + # Create a Composition object from the dataFiles list constructed. + normalisation.composition = ( + Composition("Normalisation", elements=composition) + ) + + # For enumerated attributes, + # where the member name of the attribute is + # the first 'word' in the line, and we must get the member, + # we do this: Enum[memberName]. + normalisation.geometry = ( + enums.Geometry[utils.firstword(self._getNextToken())] + ) + + # Is the geometry FLATPLATE? + if ( + ( + normalisation.geometry == enums.Geometry.SameAsBeam + and config.geometry == enums.Geometry.FLATPLATE + ) + or normalisation.geometry == enums.Geometry.FLATPLATE): + # If is is FLATPLATE, then extract the upstream and downstream + # thickness, the angle of rotation and sample width. + thickness = self._getNextToken() + normalisation.upstreamThickness = utils.nthfloat(thickness, 0) + normalisation.downstreamThickness = ( + utils.nthfloat(thickness, 1) + ) + geometryInfo = self._getNextToken() + normalisation.angleOfRotation = utils.nthfloat(geometryInfo, 0) + normalisation.sampleWidth = utils.nthfloat(geometryInfo, 1) + else: + + # Otherwise, it is CYLINDRICAL, + # then extract the inner and outer + # radii and the sample height. + radii = self._getNextToken() + normalisation.innerRadius = utils.nthfloat(radii, 0) + normalisation.outerRadius = utils.nthfloat(radii, 1) + normalisation.sampleHeight = ( + utils.nthfloat(self._getNextToken(), 0) + ) + + # Extract the density. + density = utils.nthfloat(self._getNextToken(), 0) + + # Take the absolute value of the density - since it could be -ve. + normalisation.density = abs(density) + + # Decide on the units of density. + # -ve density means it is atomic (atoms/A^3) + # +ve means it is chemical (gm/cm^3) + normalisation.densityUnits = ( + enums.UnitsOfDensity.ATOMIC if + density < 0 + else enums.UnitsOfDensity.CHEMICAL + ) + + normalisation.tempForNormalisationPC = ( + utils.nthfloat(self._getNextToken(), 0) + ) + crossSectionSource = utils.firstword(self._getNextToken()) + if ( + crossSectionSource == "TABLES" + or crossSectionSource == "TRANSMISSION" + ): + normalisation.totalCrossSectionSource = ( + enums.CrossSectionSource[crossSectionSource] + ) + else: + normalisation.totalCrossSectionSource = ( + enums.CrossSectionSource.FILE + ) + normalisation.crossSectionFilename = crossSectionSource + + normalisation.normalisationDifferentialCrossSectionFile = ( + utils.firstword(self._getNextToken()) + ) + + normalisation.lowerLimitSmoothedNormalisation = ( + utils.nthfloat(self._getNextToken(), 0) + ) + normalisation.normalisationDegreeSmoothing = ( + utils.nthfloat(self._getNextToken(), 0) + ) + normalisation.minNormalisationSignalBR = ( + utils.nthfloat(self._getNextToken(), 0) + ) + + # Consume whitespace and the closing brace. + self._consumeUpToDelim("}") + + # Resolve to relative. + pattern = re.compile(r"StartupFiles\S*") + + match = re.search( + pattern, + normalisation. + normalisationDifferentialCrossSectionFile + ) + + if match: + ( + normalisation. + normalisationDifferentialCrossSectionFile + ) = match.group() + + return normalisation + + except Exception as e: + raise ParserException( + "Whilst parsing Normalisation, an exception occured." + " The input file is most likely of an incorrect format, " + "and some attributes were missing." + ) from e + + def _parseSampleBackground(self) -> SampleBackground: + """ + Intialises a SampleBackground object. + Parses the attributes of the SampleBackground from the input stream. + Raises a ParserException if any mandatory attributes are missing. + Returns the parsed object. + + Parameters + ---------- + None + Returns + ------- + sampleBackground : SampleBackground + The SampleBackground that was parsed from the input lines. + """ + + try: + sampleBackground = SampleBackground() + line = self._peekNextToken() + if "SAMPLE BACKGROUND" in line and "{" in line: + self._consumeTokens(1) + self._consumeWhitespace() + dataFileInfo = self._getNextToken() + numberOfFiles = utils.nthint(dataFileInfo, 0) + sampleBackground.periodNumber = utils.nthint(dataFileInfo, 1) + + dataFiles = [] + for _ in range(numberOfFiles): + dataFiles.append(utils.firstword(self._getNextToken())) + sampleBackground.dataFiles = ( + DataFiles(dataFiles, "SAMPLE BACKGROUND") + ) + + # Consume whitespace and the closing brace. + self._consumeUpToDelim("}") + + return sampleBackground + except Exception as e: + raise ParserException( + "Whilst parsing Sample Background, an exception occured." + " The input file is most likely of an incorrect format, " + "and some attributes were missing." + ) from e + + def _parseSample(self) -> Sample: + """ + Intialises a Sample object. + Parses the attributes of the Sample from the input stream. + Raises a ParserException if any mandatory attributes are missing. + Returns the parsed object. + """ + + try: + # Create a new instance of Sample. + sample = Sample() + + # Extract the sample name, and then discard whitespace lines. + sample.name = ( + str(self._getNextToken()[:-2]).strip() + .replace("SAMPLE", "").strip() + ) + sample.name = utils.replace_unwanted_chars(sample.name) + self._consumeWhitespace() + # The number of files and period number are both stored + # on the same line. + # So we extract the 0th integer for the number of files, + # and the 1st integer for the period number. + dataFileInfo = self._getNextToken() + numberOfFiles = utils.nthint(dataFileInfo, 0) + sample.periodNumber = utils.nthint(dataFileInfo, 1) + + # Extract params files + dataFiles = [] + for _ in range(numberOfFiles): + dataFiles.append(utils.firstword(self._getNextToken())) + # Create a DataFiles object from the dataFiles list constructed. + sample.dataFiles = DataFiles(dataFiles, sample.name, True) + + # For boolean attributes, we convert the first + # integer in the line to its boolean value. + sample.forceCalculationOfCorrections = ( + utils.boolifyNum(utils.nthint(self._getNextToken(), 0)) + ) + + # Construct composition + composition = [] + line = self._getNextToken() + + # Extract the composition. + # Each element in the composition consists of the first 'word', + # integer at the second position, and float t the first position, + # (Atomic Symbol, MassNo, Abundance) in the line. + # If the marker line is encountered, + # then the panel has been parsed. + while "end of composition input" not in line: + + atomicSymbol = utils.firstword(line) + massNo = utils.nthfloat(line, 1) + abundance = utils.nthfloat(line, 2) + + # Create an Element object and append to the composition list. + composition.append(Element(atomicSymbol, massNo, abundance)) + line = self._getNextToken() + + # Create a Composition object from the dataFiles list constructed. + sample.composition = Composition("Sample", elements=composition) + + # For enumerated attributes, + # where the member name of the attribute is + # the first 'word' in the line, and we must get the member, + # we do this: Enum[memberName]. + sample.geometry = enums.Geometry[utils.firstword( + self._getNextToken())] + + # Is the geometry FLATPLATE? + if ( + ( + sample.geometry == enums.Geometry.SameAsBeam + and config.geometry == enums.Geometry.FLATPLATE + ) + or sample.geometry == enums.Geometry.FLATPLATE): + # If is is FLATPLATE, then extract the upstream and downstream + # thickness, the angle of rotation and sample width. + thickness = self._getNextToken() + sample.upstreamThickness = utils.nthfloat(thickness, 0) + sample.downstreamThickness = utils.nthfloat(thickness, 1) + + geometryInfo = self._getNextToken() + sample.angleOfRotation = utils.nthfloat(geometryInfo, 0) + sample.sampleWidth = utils.nthfloat(geometryInfo, 1) + else: + + # Otherwise, it is CYLINDRICAL, + # then extract the inner and outer + # radii and the sample height. + radii = self._getNextToken() + sample.innerRadius = utils.nthfloat(radii, 0) + sample.outerRadius = utils.nthfloat(radii, 1) + sample.sampleHeight = utils.nthfloat(self._getNextToken(), 0) + + # Extract the density. + density = utils.nthfloat(self._getNextToken(), 0) + + # Decide on the units of density. + # -ve density means it is atomic (atoms/A^3) + # +ve means it is chemical (gm/cm^3) + sample.density = abs(density) + sample.densityUnits = ( + enums.UnitsOfDensity.ATOMIC if + density < 0 + else enums.UnitsOfDensity.CHEMICAL + ) + sample.tempForNormalisationPC = utils.nthfloat( + self._getNextToken(), 0) + crossSectionSource = utils.firstword(self._getNextToken()) + if ( + crossSectionSource == "TABLES" + or crossSectionSource == "TRANSMISSION" + ): + sample.totalCrossSectionSource = ( + enums.CrossSectionSource[crossSectionSource] + ) + else: + sample.totalCrossSectionSource = enums.CrossSectionSource.FILE + sample.crossSectionFilename = crossSectionSource + sample.sampleTweakFactor = utils.nthfloat(self._getNextToken(), 0) + + topHatW = utils.nthfloat(self._getNextToken(), 0) + if topHatW == 0: + sample.topHatW = 0 + sample.FTMode = enums.FTModes.NO_FT + elif topHatW < 0: + sample.topHatW = abs(topHatW) + sample.FTMode = enums.FTModes.SUB_AVERAGE + else: + sample.topHatW = topHatW + sample.FTMode = enums.FTModes.ABSOLUTE + + sample.minRadFT = utils.nthfloat(self._getNextToken(), 0) + sample.grBroadening = utils.nthfloat(self._getNextToken(), 0) + + # Extract the resonance values. + # Each row consists of the first 2 floats. + # (minWavelength, maxWavelength) in the line. + # If the marker line is encountered, + # then the values has been parsed. + line = self._getNextToken() + while ( + "to finish specifying wavelength range of resonance" + not in line + ): + sample.resonanceValues.append( + utils.extract_floats_from_string(line) + ) + line = self._getNextToken() + + # Extract the exponential values. + # Each row consists of the first 3 numbers. + # (Amplitude, Decay, N) in the line. + # If the marker line is encountered, + # then the values has been parsed. + line = self._getNextToken() + if "to specify end of exponential parameter input" not in line: + sample.exponentialValues = [] + while "to specify end of exponential parameter input" not in line: + sample.exponentialValues.append( + utils.extract_nums_from_string(line) + ) + + line = self._getNextToken() + + sample.normalisationCorrectionFactor = ( + utils.nthfloat(self._getNextToken(), 0) + ) + sample.fileSelfScattering = utils.firstword(self._getNextToken()) + sample.normaliseTo = ( + enums.NormalisationType[ + enums.NormalisationType(utils.nthint( + self._getNextToken(), 0)).name + ] + ) + sample.maxRadFT = utils.nthfloat(self._getNextToken(), 0) + sample.outputUnits = ( + enums.OutputUnits[enums.OutputUnits( + utils.nthint(self._getNextToken(), 0)).name] + ) + sample.powerForBroadening = utils.nthfloat(self._getNextToken(), 0) + sample.stepSize = utils.nthfloat(self._getNextToken(), 0) + sample.runThisSample = utils.boolifyNum( + utils.nthint(self._getNextToken(), 0)) + environmentValues = self._getNextToken() + sample.scatteringFraction = utils.nthfloat(environmentValues, 0) + sample.attenuationCoefficient = utils.nthfloat( + environmentValues, 1) + + # Consume whitespace and the closing brace. + self._consumeUpToDelim("}") + + return sample + + except Exception as e: + raise ParserException( + "Whilst parsing Sample, an exception occured." + " The input file is most likely of an incorrect format, " + "and some attributes were missing." + ) from e + + def _parseContainer(self) -> Container: + """ + Intialises a Container object. + Parses the attributes of the Container from the input stream. + Raises a ParserException if any mandatory attributes are missing. + Returns the parsed object. + """ + + try: + # Create a new instance of Container. + container = Container() + + # Extract the name from the lines, + # and then discard the unnecessary lines. + container.name = ( + str(self._getNextToken()[:-2]).strip() + .replace("CONTAINER", "").strip() + ) + self._consumeWhitespace() + + # The number of files and period number are both stored + # on the same line. + # So we extract the 0th integer for the number of files, + # and the 1st integer for the period number. + dataFileInfo = self._getNextToken() + numberOfFiles = utils.nthint(dataFileInfo, 0) + container.periodNumber = utils.nthint(dataFileInfo, 1) + + # Extract params files + dataFiles = [] + for _ in range(numberOfFiles): + dataFiles.append(utils.firstword(self._getNextToken())) + + # Create a DataFiles object from the dataFiles list constructed. + container.dataFiles = DataFiles(dataFiles, container.name, True) + + # Construct composition + composition = [] + line = self._getNextToken() + # Extract the composition. + # Each element in the composition consists of the first 'word', + # integer at the second position, and float t the first position, + # (Atomic Symbol, MassNo, Abundance) in the line. + # If the marker line is encountered, + # then the panel has been parsed. + while "end of composition input" not in line: + + atomicSymbol = utils.firstword(line) + massNo = utils.nthfloat(line, 1) + abundance = utils.nthfloat(line, 2) + + # Create an Element object and append to the composition list. + composition.append(Element(atomicSymbol, massNo, abundance)) + line = self._getNextToken() + # Create a Composition object from the dataFiles list constructed. + container.composition = Composition( + "Container", + elements=composition + ) + + # For enumerated attributes, + # where the member name of the attribute is + # the first 'word' in the line, and we must get the member, + # we do this: Enum[memberName]. + container.geometry = enums.Geometry[utils.firstword( + self._getNextToken())] + + # Is the geometry FLATPLATE? + if ( + ( + container.geometry == enums.Geometry.SameAsBeam + and config.geometry == enums.Geometry.FLATPLATE + ) + or container.geometry == enums.Geometry.FLATPLATE): + # If is is FLATPLATE, then extract the upstream and downstream + # thickness, the angle of rotation and sample width. + thickness = self._getNextToken() + container.upstreamThickness = utils.nthfloat(thickness, 0) + container.downstreamThickness = utils.nthfloat(thickness, 1) + + geometryValues = self._getNextToken() + container.angleOfRotation = utils.nthfloat(geometryValues, 0) + container.sampleWidth = utils.nthfloat(geometryValues, 1) + else: + + # Otherwise, it is CYLINDRICAL, + # then extract the inner and outer + # radii and the sample height. + radii = self._getNextToken() + container.innerRadius = utils.nthfloat(radii, 0) + container.outerRadius = utils.nthfloat(radii, 1) + container.sampleHeight = utils.nthfloat( + self._getNextToken(), 0) + + # Extract the density. + density = utils.nthfloat(self._getNextToken(), 0) + + # Take the absolute value of the density - since it could be -ve. + container.density = abs(density) + + # Decide on the units of density. + # -ve density means it is atomic (atoms/A^3) + # +ve means it is chemical (gm/cm^3) + container.densityUnits = ( + enums.UnitsOfDensity.ATOMIC if + density < 0 + else enums.UnitsOfDensity.CHEMICAL + ) + crossSectionSource = utils.firstword(self._getNextToken()) + if ( + crossSectionSource == "TABLES" + or crossSectionSource == "TRANSMISSION" + ): + container.totalCrossSectionSource = ( + enums.CrossSectionSource[crossSectionSource] + ) + else: + container.totalCrossSectionSource = ( + enums.CrossSectionSource.FILE + ) + container.crossSectionFilename = crossSectionSource + container.tweakFactor = utils.nthfloat(self._getNextToken(), 0) + + environmentValues = self._getNextToken() + container.scatteringFraction = utils.nthfloat(environmentValues, 0) + container.attenuationCoefficient = utils.nthfloat( + environmentValues, 1) + + # Consume whitespace and the closing brace. + self._consumeUpToDelim("}") + + return container + + except Exception as e: + raise ParserException( + "Whilst parsing Container, an exception occured." + " The input file is most likely of an incorrect format, " + "and some attributes were missing." + ) from e + + def _parseComponents(self) -> Components: + components = Components() + try: + while self.stream: + component = self._parseComponent() + if component: + components.addComponent(component) + return components + except Exception as e: + raise ParserException( + "Whilst parsing Components, an exception occured." + " The input file is most likely of an incorrect format." + ) from e + + def _parseComponent(self) -> Component: + name = self._getNextToken().rstrip() + component = Component(name) + line = self._peekNextToken() + if "(" in line: + self._consumeTokens(1) + else: + return + line = self._getNextToken() + while line and ")" not in line: + atomicSymbol, massNo, abundance = line.split() + element = Element(atomicSymbol, float(massNo), float(abundance)) + component.addElement(element) + line = self._getNextToken() + return component + + def _sampleBackgroundHelper(self) -> SampleBackground: + """ + Helper method for parsing Sample Background and its + Samples and their Containers. + Returns the SampleBackground object. + Parameters + ---------- + None + Returns + ------- + SampleBackground + The SampleBackground parsed from the lines. + """ + + # Parse sample background. + sampleBackground = self._parseSampleBackground() + + self._consumeWhitespace() + line = self._peekNextToken() + + # Parse all Samples and Containers belonging to the sample background. + while "END" not in line and "SAMPLE BACKGROUND" not in line: + if not line: + raise ParserException("Unexpected EOF during parsing.") + elif "GO" in line: + self._getNextToken() + elif "SAMPLE" in line and utils.firstword(line) == "SAMPLE": + sample = self._parseSample() + if not sample.name: + sample.name = utils.replace_unwanted_chars( + utils.uniquifyName( + "SAMPLE", + [s.name for s in sampleBackground.samples], + sep="", + incFirst=True + )) + sampleBackground.samples.append(sample) + elif "CONTAINER" in line and utils.firstword(line) == "CONTAINER": + container = self._parseContainer() + if not container.name: + container.name = utils.uniquifyName( + "CONTAINER", + [c.name + for c in sampleBackground.samples[-1].containers], + sep="", + incFirst=True) + sampleBackground.samples[-1].containers.append( + container + ) + self._consumeWhitespace() + line = self._peekNextToken() + return sampleBackground + + +Container._getNextToken = GudrunFileParser._getNextToken +Container._peekNextToken = GudrunFileParser._peekNextToken +Container._consumeUpToDelim = GudrunFileParser._consumeUpToDelim +Container._consumeWhitespace = GudrunFileParser._consumeWhitespace diff --git a/gudpy/core/iterators.py b/gudpy/core/iterators.py index 23360693..7b77529f 100644 --- a/gudpy/core/iterators.py +++ b/gudpy/core/iterators.py @@ -1,10 +1,12 @@ from copy import deepcopy +import os import math from enum import Enum from core.gud_file import GudFile from core.enums import Scales, IterationModes from core.gudrun_file import GudrunFile +from core import utils import core.output_file_handler as handlers @@ -483,8 +485,6 @@ def setSelfScatteringFiles(self, scale, gudrunFile, prevOutput=None): extensions to msubw01. If the scale selected is the wavelength-scale, then set self scattering file extensions to mint01. """ - # Dict to pick suffix based on scale - suffix = {Scales.Q: ".msubw01", Scales.WAVELENGTH: ".mint01"}[scale] # Iterate through all of the samples, and set the suffixes of # all of their data files to the suffix @@ -493,12 +493,10 @@ def setSelfScatteringFiles(self, scale, gudrunFile, prevOutput=None): for sample in sampleBackground.samples: if sample.runThisSample and len(sample.dataFiles): target = sample - filename = target.dataFiles[0] - targetFile = ( - prevOutput.output( - sample.name, filename, suffix) - if prevOutput else "" - ) + if scale == Scales.Q: + targetFile = target.dataFiles[0].msubwFile + else: + targetFile = target.dataFiles[0].mintFile target.fileSelfScattering = ( targetFile ) @@ -580,13 +578,30 @@ def organiseOutput(self, gudrunFile, exclude=[]): """ overwrite = (self.iterationCount == 1 and self.iterationType == "WavelengthIteration") + head = os.path.join( + utils.replace_unwanted_chars(self.name), + f"{self.iterationType}_{self.iterationCount}" + ) + + # Organise output history (don't overwrite) + outputHandler = handlers.GudrunOutputHandler( gudrunFile=gudrunFile, - head=f"{self.iterationType}_{self.iterationCount}", + head=head, overwrite=overwrite ) output = outputHandler.organiseOutput(exclude=exclude) self.gudrunOutputs.append(output) + + # Organise current output (overrides each time) + + outputHandler = handlers.GudrunOutputHandler( + gudrunFile=gudrunFile, + head="Current", + overwrite=True + ) + output = outputHandler.organiseOutput(exclude=exclude) + return output diff --git a/gudpy/core/output_file_handler.py b/gudpy/core/output_file_handler.py index 98631a58..429a10ec 100644 --- a/gudpy/core/output_file_handler.py +++ b/gudpy/core/output_file_handler.py @@ -7,6 +7,7 @@ import core.utils as utils from core.gud_file import GudFile from core.gudrun_file import GudrunFile +from core.io.gudpy_io import GudPyIO @dataclass @@ -127,7 +128,7 @@ def __init__( super().__init__( gudrunFile.instrument.GudrunInputFileDir, - gudrunFile.projectDir, + GudPyIO.projectDir, "Gudrun", ) @@ -140,6 +141,7 @@ def __init__( # Directory where Gudrun files are outputted (temp) self.gudrunDir = self.procDir self.gudrunFile = gudrunFile + self.gudrunFile.outputFolder = self.outputDir # Make sure it is a temporary directory assert (self.gudrunDir.startswith(tempfile.gettempdir())) @@ -176,11 +178,9 @@ def organiseOutput(self, exclude: list[str] = []): inputFilePath = self._createAddOutDir(self.tempOutDir, exclude) # If overwrite, move previous directory - if self.overwrite and os.path.exists( - os.path.join(self.gudrunFile.projectDir, "Gudrun")): + if self.overwrite and os.path.exists(self.outputDir): with tempfile.TemporaryDirectory() as tmp: - shutil.move(os.path.join(self.gudrunFile.projectDir, "Gudrun"), - os.path.join(tmp, "prev")) + shutil.move(self.outputDir, os.path.join(tmp, "prev")) # Move over folders to output directory shutil.move(self.tempOutDir, utils.uniquify(self.outputDir)) @@ -203,12 +203,15 @@ def _createNormDir(self, dest: str): # Create normalisation folders and move datafiles for normFile in self.gudrunFile.normalisation.dataFiles: self._copyOutputs( - normFile, os.path.join( - dest, "Normalisation")) + normFile, dest, "Normalisation") + self.gudrunFile.normalisation.outputDir = os.path.join( + self.outputDir, "Normalisation" + ) for normBgFile in self.gudrunFile.normalisation.dataFilesBg: - self._copyOutputs(normBgFile, - os.path.join(dest, - "NormalisationBackground")) + self._copyOutputs(normBgFile, dest, "NormalisationBackground") + self.gudrunFile.normalisation.backgroundOutputDir = os.path.join( + self.outputDir, "NormalisationBackground" + ) def _createSampleBgDir(self, dest: str): """ @@ -227,10 +230,15 @@ def _createSampleBgDir(self, dest: str): for dataFile in sampleBackground.dataFiles: self._copyOutputs( dataFile, + dest, os.path.join( - dest, "SampleBackgrounds", - f"SampleBackground{count + 1}") - ) + "SampleBackgrounds", + f"SampleBackground{count + 1}" + )) + sampleBackground.outputDir = os.path.join( + self.outputDir, "SampleBackgrounds", + f"SampleBackground{count + 1}" + ) def _createSampleDir(self, dest: str): """ @@ -297,17 +305,21 @@ def _createSampleDir(self, dest: str): sampleOutputs[sample.name] = SampleOutput( sampleFile, gudFile, sampleOutput, sampleDiag) + sample.outputFolder = os.path.join( + self.outputDir, + utils.replace_unwanted_chars(sample.name) + + ) + # Create container folders within sample folder for container in sample.containers: - containerPath = os.path.join( - samplePath, - (utils.replace_unwanted_chars(container.name) - if container.name != "CONTAINER" - else "Container")) for dataFile in container.dataFiles: self._copyOutputs( dataFile, - containerPath + samplePath, + (utils.replace_unwanted_chars(container.name) + if container.name != "CONTAINER" + else "Container") ) return sampleOutputs @@ -349,7 +361,7 @@ def _createAddOutDir(self, dest: str, exclude: list[str] = []): continue return inputFile - def _copyOutputs(self, fpath, dest): + def _copyOutputs(self, dataFile, dest, folderName): """ Copy all files with the same basename as the provided filepath, except the original file. @@ -363,13 +375,15 @@ def _copyOutputs(self, fpath, dest): dest : str Directory for the files to be copied to """ - fname = os.path.splitext(fpath)[0] + dest = os.path.join(dest, folderName) + fname = os.path.splitext(dataFile.filename)[0] runDir = os.path.join(dest, fname) dirCreated = False for f in os.listdir(self.gudrunDir): # Get files with the same filename but not the same # extension - if os.path.splitext(f)[0] == fname: + name, ext = os.path.splitext(f) + if name == fname: if not dirCreated: utils.makeDir(runDir) dirCreated = True @@ -377,9 +391,15 @@ def _copyOutputs(self, fpath, dest): os.path.join(self.gudrunDir, f), os.path.join(runDir, f) ) + dataFile.outputFolder = os.path.join( + self.outputDir, folderName, fname + ) + dataFile.addOutput(os.path.join( + self.outputDir, folderName, fname, f + )) self.copiedFiles.append(f) - def _copyOutputsByExt(self, fpath, dest, folderName): + def _copyOutputsByExt(self, dataFile, dest, folderName): """ Copy all files with the same basename as the provided filepath and splits them into outputs @@ -404,7 +424,7 @@ def _copyOutputsByExt(self, fpath, dest, folderName): Dictionary mapping output extension to filepath """ # Data filename - fname = os.path.splitext(fpath)[0] + fname = os.path.splitext(dataFile.filename)[0] # Path to folder which will hold all outputs from the run runDir = os.path.join(dest, fname) # Has the run dir been created? @@ -431,9 +451,14 @@ def _copyOutputsByExt(self, fpath, dest, folderName): gudFile = GudFile(os.path.join(self.gudrunDir, f)) outputs[ext] = os.path.join( self.outputDir, folderName, fname, "Outputs", f) + dataFile.addOutput(os.path.join( + self.outputDir, folderName, fname, "Outputs", f)) + else: diagnostics[ext] = os.path.join( self.outputDir, folderName, fname, "Diagnostics", f) + dataFile.addOutput(os.path.join( + self.outputDir, folderName, fname, "Diagnostics", f)) shutil.copyfile( os.path.join(self.gudrunDir, f), os.path.join(dir, f) diff --git a/gudpy/core/purge_file.py b/gudpy/core/purge_file.py index ef5e44c8..3da03398 100644 --- a/gudpy/core/purge_file.py +++ b/gudpy/core/purge_file.py @@ -10,7 +10,6 @@ class PurgeFile: """ Class to represent a PurgeFile. - ... Attributes @@ -25,12 +24,6 @@ class PurgeFile: standard deviation. ignoreBad : bool Ignore any existing bad spectrum files (spec.bad, spec.dat)? - Methods - ------- - write_out() - Writes out the string representation of the PurgeFile to purge_det.dat - purge() - Writes out the file, and then calls purge_det on that file. """ def __init__( @@ -53,28 +46,6 @@ def __init__( self.standardDeviation = standardDeviation self.ignoreBad = ignoreBad - def write_out(self, path=""): - """ - Writes out the string representation of the PurgeFile to - purge_det.dat. - - Parameters - ---------- - None - Returns - ------- - None - """ - # Write out the string representation of the PurgeFile - # To purge_det.dat. - if not path: - f = open("purge_det.dat", "w", encoding="utf-8") - f.write(str(self)) - else: - f = open(path, "w", encoding="utf-8") - f.write(str(self)) - f.close() - def __str__(self): """ Returns the string representation of the PurgeFile object. diff --git a/gudpy/core/sample.py b/gudpy/core/sample.py index 2f86ba95..9d0b99d0 100644 --- a/gudpy/core/sample.py +++ b/gudpy/core/sample.py @@ -1,3 +1,5 @@ +import os + from core.utils import bjoin, numifyBool from core.data_files import DataFiles from core.composition import Composition @@ -108,7 +110,7 @@ def __init__(self): """ self.name = "" self.periodNumber = 1 - self.dataFiles = DataFiles([], "SAMPLE") + self.dataFiles = DataFiles([], "SAMPLE", True) self.forceCalculationOfCorrections = True self.composition = Composition("SAMPLE") self.geometry = Geometry.SameAsBeam @@ -148,6 +150,17 @@ def __init__(self): "yamlignore" } + self.outputFolder = "" + self.sampleFile = "" + + def importOutputs(self, path): + self.outputFolder = path + for f in os.listdir(path): + ext = os.path.splitext(f) + if ext == ".sample": + self.sampleFile = f + return + def pathName(self): return utils.replace_unwanted_chars(self.name).translate( {ord(x): '' for x in r'/\!*~,&|[]'} diff --git a/gudpy/core/sample_background.py b/gudpy/core/sample_background.py index 4e454899..2c4596dd 100644 --- a/gudpy/core/sample_background.py +++ b/gudpy/core/sample_background.py @@ -18,6 +18,7 @@ class SampleBackground: Methods ------- """ + def __init__(self): """ Constructs all the necessary attributes for the @@ -37,6 +38,8 @@ def __init__(self): "yamlignore" } + self.outputFolder = "" + def __str__(self): """ Returns the string representation of the SampleBackground object. diff --git a/gudpy/core/utils.py b/gudpy/core/utils.py index 32dfe053..1520a950 100644 --- a/gudpy/core/utils.py +++ b/gudpy/core/utils.py @@ -23,6 +23,10 @@ def firstword(string): return string.split(" ")[0] +def lastword(string): + return string.split(" ")[-1] + + def replace_unwanted_chars(string): unwanted = [" "] for char in unwanted: diff --git a/gudpy/gudpy_cli.py b/gudpy/gudpy_cli.py index 9bd3a637..d5d90c2b 100644 --- a/gudpy/gudpy_cli.py +++ b/gudpy/gudpy_cli.py @@ -18,7 +18,6 @@ def loadProject(ctx, project): def loadFile(ctx, value): - print(value) file, format = value if not file: file = click.prompt("Path to load file", type=click.Path()) @@ -35,7 +34,7 @@ def loadFile(ctx, value): if not file or format: return ctx.obj = gp.GudPy() - ctx.obj.loadFromFile(file, format) + ctx.obj.loadFromGudrunFile(file) click.echo(click.style(u"\u2714", fg="green", bold=True) + f" GudPy input file {file} sucessfuly loaded") @@ -56,7 +55,7 @@ def loadConfig(ctx, cfg): return ctx.obj = gp.GudPy() - ctx.obj.loadFromFile(cfg, enums.Format.TXT, config=True) + ctx.obj.loadFromGudrunFile(cfg, config=True) def echoIndent(text): diff --git a/gudpy/gudpy_gui.py b/gudpy/gudpy_gui.py index ec4cc015..ba0a8c74 100644 --- a/gudpy/gudpy_gui.py +++ b/gudpy/gudpy_gui.py @@ -10,7 +10,7 @@ def __init__(self, args): super(GudPy, self).__init__(args) self.gudpy = GudPyController() self.aboutToQuit.connect(self.gudpy.cleanup) - sys.exit(self.exec_()) + sys.exit(self.exec()) def onException(self, cls, exception, traceback): self.gudpy.onException(cls, exception, traceback) diff --git a/gudpy/gui/widgets/core/control.py b/gudpy/gui/widgets/core/control.py index 2bebd34b..1dc28934 100644 --- a/gudpy/gui/widgets/core/control.py +++ b/gudpy/gui/widgets/core/control.py @@ -1,5 +1,4 @@ import os -import re import sys import traceback import typing as typ @@ -31,7 +30,6 @@ def __init__(self): super().__init__() self.gudpy: gp.GudPy = gp.GudPy() self.mainWidget: QtWidgets.QMainWindow = GudPyMainWindow() - self.purged: bool = False # Current process thread running self.workerThread: QtCore.QThread = None @@ -83,7 +81,7 @@ def connectUiSlots(self): self.mainWidget.ui.exportInputFile.triggered.connect( self.exportInputFile) self.mainWidget.ui.viewLiveInputFile.triggered.connect( - self.mainWidget.viewInput) + self.viewInput) self.mainWidget.ui.insertSampleBackground.triggered.connect( self.mainWidget.ui.objectTree.insertSampleBackground ) @@ -111,6 +109,16 @@ def connectUiSlots(self): self.mainWidget.ui.exportArchive.triggered.connect(self.exportArchive) self.mainWidget.ui.exit.triggered.connect(self.exit_) + @property + def gudrunFile(self) -> None: + return self.gudpy.gudrunFile + + @gudrunFile.setter + def gudrunFile(self, gudrunFile: gudrunFile) -> None: + self.gudpy = gp.GudPy() + self.gudpy.gudrunFile = gudrunFile + self.mainWidget.updateWidgets(self.gudpy.gudrunFile) + """ INPUT / OUTPUT @@ -119,12 +127,12 @@ def connectUiSlots(self): def tryLoadAutosaved(self, projectDir): for f in os.listdir(projectDir): - if f == self.gudpy.autosaveLocation: + if f == self.gudpy.io.autosavePath: path = os.path.join(projectDir, f) autoFileInfo = QtCore.QFileInfo(path) autoDate = autoFileInfo.lastModified() - fileInfo = QtCore.QFileInfo(self.gudpy.gudrunFile.path()) + fileInfo = QtCore.QFileInfo(self.gudpy.io.loadFile) currentDate = fileInfo.lastModified() if autoDate > currentDate: @@ -160,7 +168,13 @@ def loadFromFile(self): fmt = filters[filter] try: gudpy = gp.GudPy() - gudpy.loadFromFile(loadFile=filename, format=fmt) + if fmt == enums.Format.TXT: + gudpy.loadFromGudrunFile(filename) + elif fmt == enums.Format.YAML: + gudpy.loadFromYamlFile(filename) + else: + raise RuntimeError(f"Unsupported format: {fmt}") + self.gudpy = gudpy except (FileNotFoundError, exc.ParserException) as e: self.mainWidget.sendError(e) @@ -168,9 +182,9 @@ def loadFromFile(self): except IOError: self.mainWidget.sendError("Could not open file.") return - self.mainWidget.updateWidgets(self.gudpy.gudrunFile) + self.mainWidget.updateWidgets(self.gudrunFile) self.mainWidget.setWindowTitle( - f"GudPy - {self.gudpy.gudrunFile.filename}[*]") + f"GudPy - {os.path.basename(filename)}[*]") def loadFromProject(self): """Load from previous GudPy project @@ -187,7 +201,7 @@ def loadFromProject(self): if autosave: filename = autosave gudpy = gp.GudPy() - gudpy.loadFromFile(loadFile=filename) + gudpy.loadFromGudrunFile(loadFile=filename) self.gudpy = gudpy except (FileNotFoundError, exc.ParserException) as e: self.mainWidget.sendError(e) @@ -195,12 +209,15 @@ def loadFromProject(self): except IOError: self.mainWidget.sendError("Could not open file.") return - self.mainWidget.updateWidgets(self.gudpy.gudrunFile) + except exc.YAMLException as e: + self.mainWidget.sendError(f"Could not parse input file: {e}") + return + self.mainWidget.updateWidgets(self.gudrunFile) self.mainWidget.setWindowTitle( - f"GudPy - {self.gudpy.gudrunFile.filename}[*]") + f"GudPy - {self.gudpy.io.projectName}[*]") def newProject(self): - if self.gudpy.gudrunFile: + if self.gudrunFile: save = QtWidgets.QMessageBox.question( self.mainWidget, "GudPy", @@ -217,12 +234,11 @@ def newProject(self): if not configurationDialog.cancelled and result: self.gudpy = gp.GudPy() - self.gudpy.loadFromFile( + self.gudpy.loadFromGudrunFile( loadFile=configurationDialog.configuration, - format=enums.Format.TXT, config=True ) - self.mainWidget.updateWidgets(self.gudpy.gudrunFile) + self.mainWidget.updateWidgets(self.gudrunFile) def setSaveLocation(self, saveAs=False): """Function to let the user choose where the project is saved to @@ -275,35 +291,37 @@ def exportInputFile(self): """ Saves the current state of the input file as... """ - filename, filter = QFileDialog.getSaveFileName( - self, + filename, _ = QFileDialog.getSaveFileName( + self.mainWidget, "Export input file as..", ".", - "YAML (*.yaml);;Gudrun Compatible (*.txt)", ) - fmt = enums.Format.YAML if filename: - ext = re.search(r"\((.+?)\)", filter).group(1).replace("*", "") - fmt = enums.Format.TXT if ext == ".txt" else enums.Format.YAML - if filter and sys.platform.startswith("linux"): - filename += ext + filename.replace(".txt", "") + filename += ".txt" if os.path.dirname(filename) == self.gudpy.projectDir: self.mainWidget.sendWarning("Do not modify project folder.") return - self.gudpy.save(path=filename, format=fmt) - self.setUnModified() + self.gudpy.io.exportGudrunFile(self.gudrunFile, filename) + self.setUnModified() def exportArchive(self): if not self.gudpy.checkSaveLocation(): if not self.setSaveLocation(): return exportDialog = dialogs.io.ExportDialog( - self.gudpy.gudrunFile, self.mainWidget) + self.gudrunFile, self.mainWidget) exportDialog.widget.exec() def autosave(self): if self.gudpy.checkSaveLocation() and not self.workerThread: - self.gudpy.save(path=self.gudpy.autosaveLocation) + self.gudpy.save() + + def viewInput(self): + text = self.mainWidget.viewInput() + self.gudrunFile = self.gudpy.io.gudrunFileParser.parse( + text.splitlines()) + self.mainWidget.updateWidgets(self.gudrunFile) """ @@ -313,7 +331,7 @@ def autosave(self): def checkFilesExist(self, showSuccessDialog: bool = False): result = file_library.GudPyFileLibrary( - self.gudpy.gudrunFile).checkFilesExist() + self.gudrunFile).checkFilesExist() if not all(r[0] for r in result[0]) or not all(r[0] for r in result[1]): undefined = [ @@ -323,7 +341,7 @@ def checkFilesExist(self, showSuccessDialog: bool = False): missingFilesDialog = dialogs.io.MissingFilesDialog( undefined, unresolved, self.mainWidget ) - missingFilesDialog.widget.exec_() + missingFilesDialog.widget.exec() return False if showSuccessDialog: @@ -353,16 +371,16 @@ def createPurgeProcess(self) -> bool: return False self.mainWidget.setControlsEnabled(False) purgeDialog = dialogs.purge.PurgeDialog( - self.gudpy.gudrunFile, self.mainWidget) - result = purgeDialog.widget.exec_() + self.gudrunFile, self.mainWidget) + result = purgeDialog.widget.exec() if (purgeDialog.cancelled or result == QDialogButtonBox.No): self.mainWidget.setControlsEnabled(True) return False - self.gudpy.purgeFile = PurgeFile(self.gudpy.gudrunFile) + self.gudpy.purgeFile = PurgeFile(self.gudrunFile) self.gudpy.purge = worker.PurgeWorker( purgeFile=self.gudpy.purgeFile, - gudrunFile=self.gudpy.gudrunFile, + gudrunFile=self.gudrunFile, ) self.connectProcessSignals( process=self.gudpy.purge, onFinish=self.purgeFinished @@ -374,7 +392,7 @@ def prepareRun(self) -> bool: if not self.checkFilesExist(): return False - if not self.gudpy.gudrunFile.checkNormDataFiles(): + if not self.gudrunFile.checkNormDataFiles(): self.mainWidget.sendWarning( "Please specify normalisation data files.") return False @@ -396,7 +414,7 @@ def startProcess(self) -> None: os.path.join( self.gudpy.projectDir, "Purge", "purge_det.dat" ) - ): + ) or not self.gudrunFile.purged: purgeResult = self.mainWidget.purgeOptionsMessageBox( "purge_det.dat found, but wasn't run in this session. " "Run Purge?", @@ -424,7 +442,7 @@ def runPurge(self) -> bool: self.startProcess() def purgeFinished(self, exitcode): - self.purged = True + self.gudrunFile.purged = True if exitcode != 0: self.mainWidget.sendError( @@ -434,20 +452,20 @@ def purgeFinished(self, exitcode): self.workerThread = None return - thresh = self.gudpy.gudrunFile.instrument.goodDetectorThreshold + thresh = self.gudrunFile.instrument.goodDetectorThreshold if thresh and self.gudpy.purge.detectors < thresh: self.mainWidget.sendWarning( f"{self.gudpy.purge.detectors} " "detectors made it through the purge.\n" " The acceptable minimum for " - f"{self.gudpy.gudrunFile.instrument.name} is {thresh}" + f"{self.gudrunFile.instrument.name} is {thresh}" ) self.mainWidget.ui.goodDetectorsLabel.setText( f"Number of Good Detectors: {self.gudpy.purge.detectors}" ) self.mainWidget.outputSlots.setOutput( self.gudpy.purge.output, "purge_det", - gudrunFile=self.gudpy.gudrunFile + self.gudrunFile ) if isinstance(self.workerThread, gp.Purge): @@ -457,7 +475,7 @@ def purgeFinished(self, exitcode): def runGudrun(self, gudrunFile=None): if not gudrunFile: - gudrunFile = self.gudpy.gudrunFile + gudrunFile = self.gudrunFile if not self.prepareRun(): return @@ -472,7 +490,7 @@ def runGudrun(self, gudrunFile=None): def iterateGudrun(self, dialog): iterationDialog = dialog( - self.mainWidget, self.gudpy.gudrunFile) + self.mainWidget, self.gudrunFile) iterationDialog.widget.exec() if not iterationDialog.params: return @@ -480,7 +498,7 @@ def iterateGudrun(self, dialog): return # If it is a Composition iteration, the gudrunFile must be specified if iterationDialog.iteratorType == iterators.Composition: - iterationDialog.params["gudrunFile"] = self.gudpy.gudrunFile + iterationDialog.params["gudrunFile"] = self.gudrunFile self.gudpy.iterator = iterationDialog.iteratorType( **iterationDialog.params) @@ -488,7 +506,7 @@ def iterateGudrun(self, dialog): # If Composition iterator, initialise Composition Worker if iterationDialog.iteratorType == iterators.Composition: self.gudpy.gudrunIterator = worker.CompositionWorker( - self.gudpy.iterator, self.gudpy.gudrunFile, self.gudpy.purge) + self.gudpy.iterator, self.gudrunFile, self.gudpy.purge) self.connectProcessSignals( process=self.gudpy.gudrunIterator, onFinish=self.compositionIterationFinished @@ -496,7 +514,7 @@ def iterateGudrun(self, dialog): # Else use standard GudrunIteratorWorker else: self.gudpy.gudrunIterator = worker.GudrunIteratorWorker( - self.gudpy.iterator, self.gudpy.gudrunFile, self.gudpy.purge) + self.gudpy.iterator, self.gudrunFile, self.gudpy.purge) self.connectProcessSignals( process=self.gudpy.gudrunIterator, onFinish=self.gudrunFinished @@ -516,7 +534,8 @@ def gudrunFinished(self, exitcode): self.mainWidget.outputSlots.setOutput( self.gudpy.gudrunIterator.output, - f"Gudrun {self.gudpy.gudrunIterator.iterator.name}") + f"Gudrun {self.gudpy.gudrunIterator.iterator.name}", + self.gudrunFile) self.mainWidget.sampleSlots.setSample( self.mainWidget.sampleSlots.sample) self.mainWidget.iterationResultsDialog( @@ -534,9 +553,9 @@ def gudrunFinished(self, exitcode): ) return self.mainWidget.outputSlots.setOutput( - self.gudpy.gudrun.output, "Gudrun") + self.gudpy.gudrun.output, "Gudrun", self.gudrunFile) self.mainWidget.updateWidgets( - gudrunFile=self.gudpy.gudrunFile, + gudrunFile=self.gudrunFile, gudrunOutput=self.gudpy.gudrun.gudrunOutput ) self.workerThread = None @@ -559,14 +578,14 @@ def runContainersAsSamples(self): if not self.prepareRun(): return gudrunFile = self.gudpy.runModes.convertContainersToSample( - self.gudpy.gudrunFile + self.gudrunFile ) self.runGudrun(gudrunFile=gudrunFile) def runFilesIndividually(self): if not self.prepareRun(): return - gudrunFile = self.gudpy.runModes.partition(self.gudpy.gudrunFile) + gudrunFile = self.gudpy.runModes.partition(self.gudrunFile) self.runGudrun(gudrunFile=gudrunFile) def runBatchProcessing(self): @@ -576,7 +595,7 @@ def runBatchProcessing(self): self.mainWidget ) self.gudpy.gudrunIterator = worker.BatchWorker( - gudrunFile=self.gudpy.gudrunFile, + gudrunFile=self.gudrunFile, purge=self.gudpy.purge, iterator=dialog.iterator, batchSize=dialog.batchSize, @@ -628,5 +647,5 @@ def exit_(self): ) if result == QtWidgets.QMessageBox.Yes: - self.gudpy.gudrunFile.save() + self.gudrunFile.save() sys.exit(0) diff --git a/gudpy/gui/widgets/core/main_window.py b/gudpy/gui/widgets/core/main_window.py index 62a728ab..2bf0506b 100644 --- a/gudpy/gui/widgets/core/main_window.py +++ b/gudpy/gui/widgets/core/main_window.py @@ -6,6 +6,7 @@ from core.container import Container from core.sample import Sample +from core.io.gudpy_io import GudPyIO from gui.widgets.dialogs.iterators import ( CompositionIterationDialog, @@ -705,7 +706,7 @@ def purgeOptionsMessageBox(self, text): def setModified(self): if not self.modified: - if self.gudrunFile.path(): + if GudPyIO.projectDir: self.modified = True self.ui.setWindowModified(True) self.ui.save.setEnabled(True) @@ -737,7 +738,7 @@ def setControlsEnabled(self, state): self.ui.viewLiveInputFile.setEnabled(state) self.ui.save.setEnabled( state & self.modified - if self.gudrunFile.path() + if GudPyIO.projectDir else False ) self.ui.exportInputFile.setEnabled(state) @@ -778,7 +779,8 @@ def setTreeActionsEnabled(self, state): def viewInput(self): self.currentState = str(self.gudrunFile) viewInputDialog = ViewInputDialog(self.gudrunFile, self) - viewInputDialog.widget.exec_() + text = viewInputDialog.widget.exec() + return text def handleAllPlotModeChanged(self, index): plotMode = self.ui.allPlotComboBox.itemData(index) diff --git a/gudpy/gui/widgets/core/worker.py b/gudpy/gui/widgets/core/worker.py index f27ef063..1456e812 100644 --- a/gudpy/gui/widgets/core/worker.py +++ b/gudpy/gui/widgets/core/worker.py @@ -39,13 +39,16 @@ def __init__(self, purgeFile: PurgeFile, gudrunFile: GudrunFile): self.appendDataFiless(gudrunFile.normalisation.dataFiles[0]) self.appendDataFiless(gudrunFile.normalisation.dataFilesBg[0]) - self.appendDataFiless([df for sb in gudrunFile.sampleBackgrounds + self.appendDataFiless([df.filename for sb + in gudrunFile.sampleBackgrounds for df in sb.dataFiles]) if not purgeFile.excludeSampleAndCan: - self.appendDataFiless([df for sb in gudrunFile.sampleBackgrounds + self.appendDataFiless([df.filename for sb + in gudrunFile.sampleBackgrounds for s in sb.samples for df in s.dataFiles if s.runThisSample]) - self.appendDataFiless([df for sb in gudrunFile.sampleBackgrounds + self.appendDataFiless([df.filename for sb + in gudrunFile.sampleBackgrounds for s in sb.samples for c in s.containers for df in c.dataFiles if s.runThisSample]) diff --git a/gudpy/gui/widgets/dialogs/io.py b/gudpy/gui/widgets/dialogs/io.py index a5510ac5..9285ebdc 100644 --- a/gudpy/gui/widgets/dialogs/io.py +++ b/gudpy/gui/widgets/dialogs/io.py @@ -95,8 +95,6 @@ class ExportDialog(QDialog): Toggles renaming files to the sample name. performExport(filename) Performs an export to a filename. - export() - Performs a standard export. exportAs() Allows exporting to a specific file. """ @@ -174,14 +172,10 @@ def loadFilesList(self, rename=False): def toggleRename(self, state): self.loadFilesList(rename=bool(state)) - def performExport(self, filename=None): + def performExport(self, filename): fl = GudPyFileLibrary(self.gudrunFile) archive = fl.exportMintData( - [ - s - for sb in self.gudrunFile.sampleBackgrounds - for s in sb.samples - ], + self.gudrunFile.samples(), renameDataFiles=self.widget.renameCheckBox.checkState(), exportTo=filename, includeParams=self.widget.includeParamsCheckBox.checkState() @@ -193,11 +187,7 @@ def performExport(self, filename=None): ) self.widget.close() - def export(self): - self.performExport() - def exportAs(self): - dialog = QFileDialog() dialog.setDefaultSuffix("zip") dialog.setWindowTitle("Export to..") diff --git a/gudpy/gui/widgets/dialogs/view_input_dialog.py b/gudpy/gui/widgets/dialogs/view_input_dialog.py index bcce525e..c45faaea 100644 --- a/gudpy/gui/widgets/dialogs/view_input_dialog.py +++ b/gudpy/gui/widgets/dialogs/view_input_dialog.py @@ -3,6 +3,7 @@ from PySide6.QtCore import QFile from PySide6.QtWidgets import QDialog from PySide6.QtUiTools import QUiLoader +from core.io.gudpy_io import GudPyIO class ViewInputDialog(QDialog): @@ -52,7 +53,7 @@ def initComponents(self): ) loader = QUiLoader() self.widget = loader.load(uifile) - self.widget.setWindowTitle(self.gudrunFile.path()) + self.widget.setWindowTitle(GudPyIO.projectName) self.widget.saveAndCloseButton.clicked.connect(self.save) self.widget.closeButton.clicked.connect(self.widget.close) self.widget.textEdit.setText(str(self.gudrunFile)) @@ -67,7 +68,6 @@ def save(self): """ Saves the input file and updates the UI appropiately. """ - with open(self.gudrunFile.path(), "w", encoding="utf-8") as fp: - fp.write(self.widget.textEdit.toPlainText()) + text = self.widget.textEdit.toPlainText() self.widget.close() - self.parent.updateFromFile() + return text diff --git a/gudpy/gui/widgets/slots/container_slots.py b/gudpy/gui/widgets/slots/container_slots.py index 87391a14..7aab92bf 100644 --- a/gudpy/gui/widgets/slots/container_slots.py +++ b/gudpy/gui/widgets/slots/container_slots.py @@ -26,7 +26,7 @@ def setContainer(self, container): # Populate the data files list. self.widget.containerDataFilesList.makeModel( - self.container.dataFiles.dataFiles + self.container.dataFiles.dataFilenames ) self.widget.containerDataFilesList.model().dataChanged.connect( @@ -640,7 +640,7 @@ def handleDataFilesAltered(self): if not self.widgetsRefreshing: self.parent.setModified() self.parent.gudrunFile.purged = False - self.container.dataFiles.dataFiles = ( + self.container.dataFiles.setFiles( self.widget.containerDataFilesList.model().stringList() ) diff --git a/gudpy/gui/widgets/slots/normalisation_slots.py b/gudpy/gui/widgets/slots/normalisation_slots.py index 235ff8eb..494726a2 100644 --- a/gudpy/gui/widgets/slots/normalisation_slots.py +++ b/gudpy/gui/widgets/slots/normalisation_slots.py @@ -20,10 +20,10 @@ def setNormalisation(self, normalisation): self.widgetsRefreshing = True self.widget.dataFilesList.makeModel( - self.normalisation.dataFiles.dataFiles + self.normalisation.dataFiles.dataFilenames ) self.widget.backgroundDataFilesList.makeModel( - self.normalisation.dataFilesBg.dataFiles + self.normalisation.dataFilesBg.dataFilenames ) self.widget.dataFilesList.setSelectionMode( @@ -645,7 +645,7 @@ def handleDataFilesAltered(self): if not self.widgetsRefreshing: self.parent.setModified() self.parent.gudrunFile.purged = False - self.normalisation.dataFiles.dataFiles = ( + self.normalisation.dataFiles.setFiles( self.widget.dataFilesList.model().stringList() ) @@ -659,7 +659,7 @@ def handleDataFilesBgAltered(self): if not self.widgetsRefreshing: self.parent.setModified() self.parent.gudrunFile.purged = False - self.normalisation.dataFilesBg.dataFiles = ( + self.normalisation.dataFilesBg.setFiles( self.widget.backgroundDataFilesList.model().stringList() ) diff --git a/gudpy/gui/widgets/slots/sample_background_slots.py b/gudpy/gui/widgets/slots/sample_background_slots.py index 3049e29e..ad7ece9b 100644 --- a/gudpy/gui/widgets/slots/sample_background_slots.py +++ b/gudpy/gui/widgets/slots/sample_background_slots.py @@ -21,7 +21,7 @@ def setSampleBackground(self, sampleBackground): # Populate data files list. self.widget.sampleBackgroundDataFilesList.makeModel( - self.sampleBackground.dataFiles.dataFiles + self.sampleBackground.dataFiles.dataFilenames ) self.widget.sampleBackgroundDataFilesList.model().dataChanged.connect( @@ -63,7 +63,7 @@ def handleDataFilesAltered(self): if not self.widgetsRefreshing: self.parent.setModified() self.parent.gudrunFile.purged = False - self.sampleBackground.dataFiles.dataFiles = ( + self.sampleBackground.dataFiles.setFiles( self.widget.sampleBackgroundDataFilesList.model().stringList() ) diff --git a/gudpy/gui/widgets/slots/sample_slots.py b/gudpy/gui/widgets/slots/sample_slots.py index e807cbd7..02277b18 100644 --- a/gudpy/gui/widgets/slots/sample_slots.py +++ b/gudpy/gui/widgets/slots/sample_slots.py @@ -27,7 +27,7 @@ def setSample(self, sample): # Populate the data files list. self.widget.sampleDataFilesList.makeModel( - self.sample.dataFiles.dataFiles + self.sample.dataFiles.dataFilenames ) self.widget.sampleDataFilesList.model().dataChanged.connect( @@ -889,7 +889,7 @@ def handleDataFilesAltered(self): if not self.widgetsRefreshing: self.parent.setModified() self.parent.gudrunFile.purged = False - self.sample.dataFiles.dataFiles = ( + self.sample.dataFiles.setFiles( self.widget.sampleDataFilesList.model().stringList() ) @@ -1149,7 +1149,7 @@ def updateExpectedDCSLevel(self, _=None, __=None): actualDcsLevel = nthfloat(self.widget.dcsLabel.text(), 2) try: error = round( - ((actualDcsLevel - dcsLevel) / actualDcsLevel)*100, 1 + ((actualDcsLevel - dcsLevel) / actualDcsLevel)*100, 1 ) except ZeroDivisionError: error = 100. diff --git a/gudpy/test/TestData/NIMROD-water/good_water/good_water.yaml b/gudpy/test/TestData/NIMROD-water/good_water/good_water.yaml index 17c96259..721fa994 100644 --- a/gudpy/test/TestData/NIMROD-water/good_water/good_water.yaml +++ b/gudpy/test/TestData/NIMROD-water/good_water/good_water.yaml @@ -1,6 +1,6 @@ -Instrument: +instrument: name: NIMROD - dataFileDir: TestData/NIMROD-water/raw/ + dataFileDir: test/TestData/NIMROD-water/raw/ dataFileType: raw detectorCalibrationFileName: StartupFiles/NIMROD/NIMROD84modules+9monitors+LAB5Oct2012Detector.dat columnNoPhiVals: 4 @@ -34,7 +34,7 @@ Instrument: logarithmicStepSize: 0.04 hardGroupEdges: true nxsDefinitionFile: '' -Beam: +beam: sampleGeometry: FLATPLATE beamProfileValues: [1.0, 1.0] stepSizeAbsorption: 0.05 @@ -53,16 +53,38 @@ Beam: overallBackgroundFactor: 1.0 sampleDependantBackgroundFactor: 0.0 shieldingAttenuationCoefficient: 0.0 -Components: [] -Normalisation: +components: [] +normalisation: periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016702_V.raw] + _dataFiles: + - filename: NIMROD00016702_V.raw + name: NIMROD00016702_V + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: false name: NORMALISATION + outputFolders: {} + _outputs: {} periodNumberBg: 1 dataFilesBg: - dataFiles: [NIMROD00016698_EmptyInst.raw, NIMROD00016703_EmptyInst.raw] + _dataFiles: + - filename: NIMROD00016698_EmptyInst.raw + name: NIMROD00016698_EmptyInst + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: false + - filename: NIMROD00016703_EmptyInst.raw + name: NIMROD00016703_EmptyInst + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: false name: NORMALISATION BACKGROUND + outputFolders: {} + _outputs: {} forceCalculationOfCorrections: true composition: type_: Normalisation @@ -86,17 +108,45 @@ Normalisation: lowerLimitSmoothedNormalisation: 0.01 normalisationDegreeSmoothing: 1.0 minNormalisationSignalBR: 0.0 -SampleBackgrounds: +sampleBackgrounds: - periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016698_EmptyInst.raw, NIMROD00016703_EmptyInst.raw] + _dataFiles: + - filename: NIMROD00016698_EmptyInst.raw + name: NIMROD00016698_EmptyInst + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: false + - filename: NIMROD00016703_EmptyInst.raw + name: NIMROD00016703_EmptyInst + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: false name: SAMPLE BACKGROUND + outputFolders: {} + _outputs: {} samples: - name: H2O,_Can_N9 periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016608_H2O_in_N9.raw, NIMROD00016610_H2O_in_N9.raw] + _dataFiles: + - filename: NIMROD00016608_H2O_in_N9.raw + name: NIMROD00016608_H2O_in_N9 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true + - filename: NIMROD00016610_H2O_in_N9.raw + name: NIMROD00016610_H2O_in_N9 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true name: H2O,_Can_N9 + outputFolders: {} + _outputs: {} forceCalculationOfCorrections: true composition: type_: Sample @@ -139,8 +189,28 @@ SampleBackgrounds: - name: N9 periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016694_Empty_N9.raw, NIMROD00016699_Empty_N9.raw, NIMROD00016704_Empty_N9.raw] + _dataFiles: + - filename: NIMROD00016694_Empty_N9.raw + name: NIMROD00016694_Empty_N9 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true + - filename: NIMROD00016699_Empty_N9.raw + name: NIMROD00016699_Empty_N9 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true + - filename: NIMROD00016704_Empty_N9.raw + name: NIMROD00016704_Empty_N9 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true name: N9 + outputFolders: {} + _outputs: {} composition: type_: Container elements: @@ -162,11 +232,28 @@ SampleBackgrounds: tweakFactor: 1.0 scatteringFraction: 1.0 attenuationCoefficient: 0.0 + outputFolder: '' + outputFolder: '' + sampleFile: '' - name: D2O,_Can_N10 periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016609_D2O_in_N10.raw, NIMROD00016611_D2O_in_N10.raw] + _dataFiles: + - filename: NIMROD00016609_D2O_in_N10.raw + name: NIMROD00016609_D2O_in_N10 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true + - filename: NIMROD00016611_D2O_in_N10.raw + name: NIMROD00016611_D2O_in_N10 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true name: D2O,_Can_N10 + outputFolders: {} + _outputs: {} forceCalculationOfCorrections: true composition: type_: Sample @@ -209,8 +296,28 @@ SampleBackgrounds: - name: N10 periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016695_Empty_N10.raw, NIMROD00016700_Empty_N10.raw, NIMROD00016705_Empty_N10.raw] + _dataFiles: + - filename: NIMROD00016695_Empty_N10.raw + name: NIMROD00016695_Empty_N10 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true + - filename: NIMROD00016700_Empty_N10.raw + name: NIMROD00016700_Empty_N10 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true + - filename: NIMROD00016705_Empty_N10.raw + name: NIMROD00016705_Empty_N10 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true name: N10 + outputFolders: {} + _outputs: {} composition: type_: Container elements: @@ -232,11 +339,28 @@ SampleBackgrounds: tweakFactor: 1.0 scatteringFraction: 1.0 attenuationCoefficient: 0.0 + outputFolder: '' + outputFolder: '' + sampleFile: '' - name: HDO,_Can_N6 periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016741_HDO_in_N6.raw, NIMROD00016743_HDO_in_N6.raw] + _dataFiles: + - filename: NIMROD00016741_HDO_in_N6.raw + name: NIMROD00016741_HDO_in_N6 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true + - filename: NIMROD00016743_HDO_in_N6.raw + name: NIMROD00016743_HDO_in_N6 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true name: HDO,_Can_N6 + outputFolders: {} + _outputs: {} forceCalculationOfCorrections: true composition: type_: Sample @@ -280,8 +404,16 @@ SampleBackgrounds: - name: N6 periodNumber: 1 dataFiles: - dataFiles: [NIMROD00014908_Empty_N6.raw] + _dataFiles: + - filename: NIMROD00014908_Empty_N6.raw + name: NIMROD00014908_Empty_N6 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true name: N6 + outputFolders: {} + _outputs: {} composition: type_: Container elements: @@ -303,11 +435,28 @@ SampleBackgrounds: tweakFactor: 1.0 scatteringFraction: 1.0 attenuationCoefficient: 0.0 + outputFolder: '' + outputFolder: '' + sampleFile: '' - name: Null_Water,_Can_N8 periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016742_NullWater_in_N8.raw, NIMROD00016744_NullWater_in_N8.raw] + _dataFiles: + - filename: NIMROD00016742_NullWater_in_N8.raw + name: NIMROD00016742_NullWater_in_N8 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true + - filename: NIMROD00016744_NullWater_in_N8.raw + name: NIMROD00016744_NullWater_in_N8 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true name: Null_Water,_Can_N8 + outputFolders: {} + _outputs: {} forceCalculationOfCorrections: true composition: type_: Sample @@ -351,8 +500,16 @@ SampleBackgrounds: - name: N8 periodNumber: 1 dataFiles: - dataFiles: [NIMROD00016994_Empty_N8.raw] + _dataFiles: + - filename: NIMROD00016994_Empty_N8.raw + name: NIMROD00016994_Empty_N8 + ext: .raw + outputFolder: '' + _outputs: {} + isSampleDataFile: true name: N8 + outputFolders: {} + _outputs: {} composition: type_: Container elements: @@ -374,4 +531,8 @@ SampleBackgrounds: tweakFactor: 1.0 scatteringFraction: 1.0 attenuationCoefficient: 0.0 + outputFolder: '' + outputFolder: '' + sampleFile: '' + outputFolder: '' GUI: {useComponents: false} diff --git a/gudpy/test/TestData/NIMROD-water/water.txt b/gudpy/test/TestData/NIMROD-water/water.txt index a6ecea35..ec85923c 100644 --- a/gudpy/test/TestData/NIMROD-water/water.txt +++ b/gudpy/test/TestData/NIMROD-water/water.txt @@ -287,7 +287,7 @@ NIMROD00016742_NullWater_in_N8.msubw01 Name of file containing self sca CONTAINER N8 { -1 1 Number of files and period number +1 1 Number of files and period number NIMROD00016994_Empty_N8.raw CONTAINER N8 data files Ti 0 7.16 Composition Zr 0 3.438 Composition diff --git a/gudpy/test/test_gud_file.py b/gudpy/test/test_gud_file.py index a4480d10..e268d57f 100644 --- a/gudpy/test/test_gud_file.py +++ b/gudpy/test/test_gud_file.py @@ -4,6 +4,7 @@ from core.exception import ParserException from core.gud_file import GudFile from core import gudpy +from core.io.gudpy_io import GudPyIO from test.test_gudpy_workflows import GudPyContext @@ -250,13 +251,6 @@ def setUp(self) -> None: self.gudpy.gudrunFile.instrument.dataFileDir = str(dataFileDir) + "/" return super().setUp() - def tearDown(self) -> None: - [os.remove(f) for f in os.listdir() if f not in self.keepsakes] - [os.remove(os.path.join(self.gudpy.projectDir, f)) - for f in os.listdir(self.gudpy.projectDir) - if f not in self.keepsakes] - return super().tearDown() - def testEmptyPath(self): emptyPath = "" self.assertRaises(ParserException, GudFile, emptyPath) @@ -337,7 +331,7 @@ def testWriteGudFileA(self): gudpy.projectDir, gf.fname ) - gf.write_out(path) + GudPyIO.writeObject(gf, path) gf1 = GudFile(path) dicA = gf.__dict__ @@ -363,7 +357,7 @@ def testWriteGudFileB(self): gudpy.projectDir, gf.fname ) - gf.write_out(path) + GudPyIO.writeObject(gf, path) gf1 = GudFile(path) dicA = gf.__dict__ diff --git a/gudpy/test/test_gudpy_io.py b/gudpy/test/test_gudpy_io.py index 591a6274..eb9460ee 100644 --- a/gudpy/test/test_gudpy_io.py +++ b/gudpy/test/test_gudpy_io.py @@ -22,9 +22,10 @@ from core.enums import ( CrossSectionSource, FTModes, Instruments, Scales, UnitsOfDensity, MergeWeights, NormalisationType, OutputUnits, - Geometry, Format + Geometry ) from core import gudpy as gp +from core.io.gudrun_file_parser import GudrunFileParser class GudPyContext: @@ -36,24 +37,27 @@ def __init__(self): ) self.gudpy = gp.GudPy() + self.gudpy.testFilePath = os.path.join(testDir, "test_data.txt") - self.gudpy.loadFromFile( + self.gudpy.loadFromGudrunFile( loadFile=path, - format=Format.TXT ) - gPath = os.path.join(self.tempdir.name, "good_water.txt") - self.gudpy.gudrunFile.write_out(gPath, overwrite=True) + gPath = os.path.join(self.tempdir.name, GudrunFile.OUTPATH) + GudrunFileParser.writeGudrunFileTo(self.gudpy.gudrunFile, gPath) - self.gudpy.loadFromFile( + self.gudpy.loadFromGudrunFile( loadFile=gPath, - format=Format.TXT ) self.gudpy.setSaveLocation(os.path.join( self.tempdir.name, "good_water" )) + os.makedirs(os.path.join( + self.tempdir.name, "good_water" + )) + def __enter__(self): return self.gudpy @@ -237,6 +241,7 @@ def setUp(self) -> None: "grBroadening": 0.0, "powerForBroadening": 0.0, "stepSize": 0.0, + "outputFolder": '', "yamlignore": { "runAsSample", "topHatW", @@ -292,6 +297,7 @@ def setUp(self) -> None: "grBroadening": 0.0, "powerForBroadening": 0.0, "stepSize": 0.0, + "outputFolder": "", "yamlignore": { "runAsSample", "topHatW", @@ -342,6 +348,7 @@ def setUp(self) -> None: "grBroadening": 0.0, "powerForBroadening": 0.0, "stepSize": 0.0, + "outputFolder": '', "yamlignore": { "runAsSample", "topHatW", @@ -392,6 +399,7 @@ def setUp(self) -> None: "grBroadening": 0.0, "powerForBroadening": 0.0, "stepSize": 0.0, + "outputFolder": '', "yamlignore": { "runAsSample", "topHatW", @@ -455,6 +463,8 @@ def setUp(self) -> None: "scatteringFraction": 1.0, "attenuationCoefficient": 0.0, "containers": [self.expectedContainerA], + "outputFolder": '', + "sampleFile": '', "yamlignore": { "yamlignore", } @@ -510,6 +520,8 @@ def setUp(self) -> None: "scatteringFraction": 1.0, "attenuationCoefficient": 0.0, "containers": [self.expectedContainerB], + "outputFolder": '', + "sampleFile": '', "yamlignore": { "yamlignore" } @@ -565,6 +577,8 @@ def setUp(self) -> None: "scatteringFraction": 1.0, "attenuationCoefficient": 0.0, "containers": [self.expectedContainerC], + "outputFolder": '', + "sampleFile": '', "yamlignore": { "yamlignore" } @@ -621,6 +635,8 @@ def setUp(self) -> None: "scatteringFraction": 1.0, "attenuationCoefficient": 0.0, "containers": [self.expectedContainerD], + "outputFolder": '', + "sampleFile": '', "yamlignore": { "yamlignore" } @@ -646,6 +662,7 @@ def setUp(self) -> None: self.expectedSampleB, self.expectedSampleC, ], + "outputFolder": '', "writeAllSamples": True, "yamlignore": { "writeAllSamples", @@ -751,7 +768,6 @@ def testLoadGudrunFile(self): ) ) for key_ in sampleAttrsDict.keys(): - if key_ == "containers": for j, container in enumerate(sample[key_]): containerAttrsDict = ( @@ -762,7 +778,6 @@ def testLoadGudrunFile(self): ) for _key in containerAttrsDict.keys(): - if isinstance( container[_key], (DataFiles, Composition), @@ -804,12 +819,18 @@ def testLoadGudrunFile(self): sampleBackgroundsAttrsDict[key], ) + def testSaveAsProject(self): + with GudPyContext() as gudpy: + gudpy.saveAs(os.path.join( + gudpy.projectDir, "test" + )) + def testWriteGudrunFile(self): with GudPyContext() as gudpy: - gudpy.gudrunFile.write_out( - gudpy.gudrunFile.loadFile, overwrite=True) + GudrunFileParser.writeGudrunFileTo( + gudpy.gudrunFile, gudpy.io.loadFile) with open( - gudpy.gudrunFile.loadFile, + gudpy.io.loadFile, encoding="utf-8" ) as f: outlines = "\n".join(f.readlines()[:-5]) @@ -872,7 +893,7 @@ def valueInLines(value, lines): else: valueInLines(value, outlines) inlines = "" - with open(gudpy.gudrunFile.loadFile, encoding="utf-8") as f: + with open(gudpy.io.loadFile, encoding="utf-8") as f: inlines = f.read() for dic in self.dicts: for value in dic.values(): @@ -896,19 +917,16 @@ def valueInLines(value, lines): def testRewriteGudrunFile(self): with GudPyContext() as gudpy: - gudpy.gudrunFile.write_out( - gudpy.gudrunFile.loadFile, overwrite=True) + GudrunFileParser.writeGudrunFileTo( + gudpy.gudrunFile, gudpy.io.loadFile) copyPath = os.path.join( gudpy.gudrunFile.instrument.GudrunInputFileDir, "copyGF.txt" ) - g1 = GudrunFile( - loadFile=gudpy.gudrunFile.loadFile, - format=Format.TXT - ) + g1 = gudpy.io.importGudrunFile(gudpy.io.loadFile) g1.instrument.GudrunInputFileDir = ( gudpy.gudrunFile.instrument.GudrunInputFileDir) - g1.write_out(copyPath, overwrite=True) + GudrunFileParser.writeGudrunFileTo(g1, copyPath) def compareString(string1, string2): return string1 == string2 @@ -937,7 +955,7 @@ def compareString(string1, string2): with open( os.path.join( - gudpy.gudrunFile.loadFile + gudpy.io.loadFile ), encoding="utf-8" ) as fg: @@ -950,12 +968,9 @@ def compareString(string1, string2): def testReloadGudrunFile(self): with GudPyContext() as gudpy: - gudpy.gudrunFile.write_out( - gudpy.gudrunFile.loadFile, overwrite=True) - g1 = GudrunFile( - loadFile=gudpy.gudrunFile.loadFile, - format=Format.TXT - ) + GudrunFileParser.writeGudrunFileTo( + gudpy.gudrunFile, gudpy.io.loadFile) + g1 = gudpy.io.importGudrunFile(gudpy.io.loadFile) g1.instrument.GudrunInputFileDir = ( gudpy.gudrunFile.instrument.GudrunInputFileDir) self.assertEqual( @@ -964,128 +979,90 @@ def testReloadGudrunFile(self): ) def testLoadEmptyGudrunFile(self): - f = open("test_data.txt", "w", encoding="utf-8") - f.close() - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual(( - 'INSTRUMENT, BEAM and NORMALISATION' - ' were not parsed. It\'s possible the file' - ' supplied is of an incorrect format!' - ), - str(cm.exception), - ) + with GudPyContext() as gudpy: + f = open(gudpy.testFilePath, "w", encoding="utf-8") + f.close() + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingInstrument(self): - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}\n\n") - f.write( - "NORMALISATION {\n\n" - + str(self.goodNormalisation) - + "\n\n}\n\n" - ) - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual(( - 'INSTRUMENT, BEAM and NORMALISATION' - ' were not parsed. It\'s possible the file' - ' supplied is of an incorrect format!' - ), - str(cm.exception), - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}\n\n") + f.write( + "NORMALISATION {\n\n" + + str(self.goodNormalisation) + + "\n\n}\n\n" + ) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingBeam(self): - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" + str(self.goodInstrument) - + "\n\n}\n\n" - ) - f.write( - "NORMALISATION {\n\n" - + str(self.goodNormalisation) - + "\n\n}" - ) - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual(( - 'INSTRUMENT, BEAM and NORMALISATION' - ' were not parsed. It\'s possible the file' - ' supplied is of an incorrect format!' - ), - str(cm.exception), - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + str(self.goodInstrument) + + "\n\n}\n\n" + ) + f.write( + "NORMALISATION {\n\n" + + str(self.goodNormalisation) + + "\n\n}" + ) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingNormalisation(self): - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" + str(self.goodInstrument) - + "\n\n}\n\n" - ) - f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}\n\n") - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual(( - 'INSTRUMENT, BEAM and NORMALISATION' - ' were not parsed. It\'s possible the file' - ' supplied is of an incorrect format!' - ), - str(cm.exception), - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + str(self.goodInstrument) + + "\n\n}\n\n" + ) + f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}\n\n") + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingInstrumentAndBeam(self): - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "NORMALISATION {\n\n" - + str(self.goodNormalisation) - + "\n\n}" - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "NORMALISATION {\n\n" + + str(self.goodNormalisation) + + "\n\n}" + ) - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual(( - 'INSTRUMENT, BEAM and NORMALISATION' - ' were not parsed. It\'s possible the file supplied' - ' is of an incorrect format!'), - str(cm.exception), - ) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, gudpy.testFilePath) def testLoadMissingInstrumentAndNormalisation(self): - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}") - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual(( - 'INSTRUMENT, BEAM and NORMALISATION' - ' were not parsed. It\'s possible the file' - ' supplied is of an incorrect format!' - ), - str(cm.exception), - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}") + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingNormalisationAndBeam(self): - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" + str(self.goodInstrument) + "\n\n}" - ) - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual(( - 'INSTRUMENT, BEAM and NORMALISATION' - ' were not parsed. It\'s possible the file' - ' supplied is of an incorrect format!' - ), - str(cm.exception), - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + "\n\n}" + ) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingInstrumentAttributesSeq(self): expectedInstrument = deepcopy(self.expectedInstrument) @@ -1099,25 +1076,20 @@ def testLoadMissingInstrumentAttributesSeq(self): expectedInstrument.pop("goodDetectorThreshold", None) expectedInstrument.pop("yamlignore", None) for i in range(len(expectedInstrument.keys())): - badInstrument = str(self.goodInstrument).split("\n") del badInstrument[i] badInstrument = "\n".join(badInstrument) - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" + str(badInstrument) + "\n\n}" - ) - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Instrument, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(badInstrument) + "\n\n}" + ) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingInstrumentAttributesRand(self): expectedInstrument = deepcopy(self.expectedInstrument) @@ -1131,7 +1103,6 @@ def testLoadMissingInstrumentAttributesRand(self): expectedInstrument.pop("goodDetectorThreshold", None) expectedInstrument.pop("yamlignore", None) for i in range(50): - key = random.choice(list(expectedInstrument)) j = list(expectedInstrument).index(key) @@ -1139,19 +1110,16 @@ def testLoadMissingInstrumentAttributesRand(self): del badInstrument[j] badInstrument = "\n".join(badInstrument) - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" + str(badInstrument) + "\n\n}" - ) - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Instrument, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(badInstrument) + "\n\n}" + ) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingBeamAttributesSeq(self): expectedBeam = deepcopy(self.expectedBeam) @@ -1169,24 +1137,18 @@ def testLoadMissingBeamAttributesSeq(self): badBeam = str(self.goodBeam).split("\n") del badBeam[i] badBeam = "\n".join(badBeam) - - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" - + str(self.goodInstrument) - + "\n\n}" - ) - f.write("\n\nBEAM {\n\n" + str(badBeam) + "\n\n}") - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Beam, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + + "\n\n}" + ) + f.write("\n\nBEAM {\n\n" + str(badBeam) + "\n\n}") + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingBeamAttributesRand(self): expectedBeam = deepcopy(self.expectedBeam) @@ -1209,23 +1171,18 @@ def testLoadMissingBeamAttributesRand(self): del badBeam[j] badBeam = "\n".join(badBeam) - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" - + str(self.goodInstrument) - + "\n\n}" - ) - f.write("\n\nBEAM {\n\n" + str(badBeam) + "\n\n}") - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Beam, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + + "\n\n}" + ) + f.write("\n\nBEAM {\n\n" + str(badBeam) + "\n\n}") + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingNormalisationAttributesSeq(self): expectedNormalisation = deepcopy(self.expectedNormalisation) @@ -1237,6 +1194,7 @@ def testLoadMissingNormalisationAttributesSeq(self): expectedNormalisation.pop("outerRadius", None) expectedNormalisation.pop("sampleHeight", None) expectedNormalisation.pop("crossSectionFilename") + expectedNormalisation.pop("outputFolder", None) expectedNormalisation.pop("yamlignore", None) self.goodNormalisation.dataFiles = DataFiles([], "") @@ -1244,33 +1202,27 @@ def testLoadMissingNormalisationAttributesSeq(self): Composition("") ) for i in range(len(expectedNormalisation.keys())): - badNormalisation = str(self.goodNormalisation).split("\n") del badNormalisation[i] badNormalisation = "\n".join(badNormalisation) - - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" - + str(self.goodInstrument) - + "\n\n}" - ) - f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") - f.write( - "\n\nNORMALISATION {\n\n" - + str(badNormalisation) - + "\n\n}" - ) - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Beam, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + + "\n\n}" + ) + f.write( + "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") + f.write( + "\n\nNORMALISATION {\n\n" + + str(badNormalisation) + + "\n\n}" + ) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingNormalisationAttributesRand(self): expectedNormalisation = deepcopy(self.expectedNormalisation) @@ -1282,6 +1234,7 @@ def testLoadMissingNormalisationAttributesRand(self): expectedNormalisation.pop("outerRadius", None) expectedNormalisation.pop("sampleHeight", None) expectedNormalisation.pop("crossSectionFilename") + expectedNormalisation.pop("outputFolder", None) expectedNormalisation.pop("yamlignore", None) self.goodNormalisation.dataFiles = DataFiles([], "") @@ -1289,7 +1242,6 @@ def testLoadMissingNormalisationAttributesRand(self): Composition("") ) for i in range(50): - key = random.choice(list(expectedNormalisation)) j = list(expectedNormalisation).index(key) @@ -1298,53 +1250,46 @@ def testLoadMissingNormalisationAttributesRand(self): del badNormalisation[j] badNormalisation = "\n".join(badNormalisation) - with open("test_data.txt", "w", encoding="utf-8") as f: + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + + "\n\n}" + ) + f.write( + "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") + f.write( + "\n\nNORMALISATION {\n\n" + + str(badNormalisation) + + "\n\n}" + ) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) + + def testLoadMissingSampleBackgroundAttributes(self): + badSampleBackground = str(self.goodSampleBackground).split("\n") + del badSampleBackground[2] + badSampleBackground = "\n".join(badSampleBackground) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: f.write("' ' ' ' '/'\n\n") f.write( - "INSTRUMENT {\n\n" - + str(self.goodInstrument) - + "\n\n}" + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + "\n\n}" ) f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") f.write( "\n\nNORMALISATION {\n\n" - + str(badNormalisation) + + str(self.goodNormalisation) + "\n\n}" ) - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Normalisation, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) - - def testLoadMissingSampleBackgroundAttributes(self): - badSampleBackground = str(self.goodSampleBackground).split("\n") - del badSampleBackground[2] - badSampleBackground = "\n".join(badSampleBackground) - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" + str(self.goodInstrument) + "\n\n}" - ) - f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") - f.write( - "\n\nNORMALISATION {\n\n" - + str(self.goodNormalisation) - + "\n\n}" - ) - f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Sample Background, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingSampleAttributesSeq(self): expectedSampleA = deepcopy(self.expectedSampleA) @@ -1360,6 +1305,8 @@ def testLoadMissingSampleAttributesSeq(self): expectedSampleA.pop("exponentialValues", None) expectedSampleA.pop("crossSectionFilename", None) expectedSampleA.pop("FTMode", None) + expectedSampleA.pop("outputFolder", None) + expectedSampleA.pop("sampleFile", None) expectedSampleA.pop("yamlignore", None) self.goodSampleBackground.samples[0].dataFiles = DataFiles([], "") @@ -1377,29 +1324,25 @@ def testLoadMissingSampleAttributesSeq(self): badSampleBackground = sbgStr.split("\n") del badSampleBackground[i + 10] badSampleBackground = "\n".join(badSampleBackground) - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" - + str(self.goodInstrument) - + "\n\n}" - ) - f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") - f.write( - "\n\nNORMALISATION {\n\n" - + str(self.goodNormalisation) - + "\n\n}" - ) - f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Sample, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + + "\n\n}" + ) + f.write( + "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") + f.write( + "\n\nNORMALISATION {\n\n" + + str(self.goodNormalisation) + + "\n\n}" + ) + f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingSampleAttributesRand(self): expectedSampleA = deepcopy(self.expectedSampleA) @@ -1415,6 +1358,8 @@ def testLoadMissingSampleAttributesRand(self): expectedSampleA.pop("exponentialValues", None) expectedSampleA.pop("crossSectionFilename", None) expectedSampleA.pop("FTMode", None) + expectedSampleA.pop("outputFolder", None) + expectedSampleA.pop("sampleFile", None) expectedSampleA.pop("yamlignore", None) self.goodSampleBackground.samples[0].dataFiles = DataFiles([], "") @@ -1435,29 +1380,25 @@ def testLoadMissingSampleAttributesRand(self): del badSampleBackground[j + 10] badSampleBackground = "\n".join(badSampleBackground) - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" - + str(self.goodInstrument) - + "\n\n}" - ) - f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") - f.write( - "\n\nNORMALISATION {\n\n" - + str(self.goodNormalisation) - + "\n\n}" - ) - f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) - - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Sample, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + + "\n\n}" + ) + f.write( + "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") + f.write( + "\n\nNORMALISATION {\n\n" + + str(self.goodNormalisation) + + "\n\n}" + ) + f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testLoadMissingContainerAttributesSeq(self): expectedContainerA = deepcopy(self.expectedContainerA) @@ -1480,6 +1421,7 @@ def testLoadMissingContainerAttributesSeq(self): expectedContainerA.pop("powerForBroadening", None) expectedContainerA.pop("stepSize", None) expectedContainerA.pop("yamlignore", None) + expectedContainerA.pop("outputFolder", None) self.goodSampleBackground.samples[0].containers[0].dataFiles = ( DataFiles([], "") @@ -1496,40 +1438,39 @@ def testLoadMissingContainerAttributesSeq(self): del badSampleBackground[i + 44] badSampleBackground = "\n".join(badSampleBackground) - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" - + str(self.goodInstrument) - + "\n\n}" - ) - f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") - f.write( - "\n\nNORMALISATION {\n\n" - + str(self.goodNormalisation) - + "\n\n}" - ) - f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Container, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + + "\n\n}" + ) + f.write( + "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") + f.write( + "\n\nNORMALISATION {\n\n" + + str(self.goodNormalisation) + + "\n\n}" + ) + f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testAppendExponentialValues(self): with GudPyContext() as gudpy: # Remove last element of exponential values list gudpy.gudrunFile.sampleBackgrounds[ 0].samples[0].exponentialValues[0].pop() - gudpy.gudrunFile.write_out() - gudrunFile = GudrunFile(loadFile=os.path.join( - gudpy.gudrunFile.instrument.GudrunInputFileDir, + gfFilePath = os.path.join( + gudpy.io.projectDir, gudpy.gudrunFile.OUTPATH - ), format=Format.TXT) + ) + + GudrunFileParser.writeGudrunFileTo(gudpy.gudrunFile, gfFilePath) + gudrunFile = gudpy.io.importGudrunFile(gfFilePath) # Test that a default value is appended self.assertEqual( self.expectedSampleA["exponentialValues"], @@ -1557,6 +1498,7 @@ def testLoadMissingContainerAttributesRand(self): expectedContainerA.pop("powerForBroadening", None) expectedContainerA.pop("stepSize", None) expectedContainerA.pop("yamlignore", None) + expectedContainerA.pop("outputFolder", None) self.goodSampleBackground.samples[0].containers[0].dataFiles = ( DataFiles([], "") @@ -1573,37 +1515,35 @@ def testLoadMissingContainerAttributesRand(self): ] sbgStr = str(self.goodSampleBackground) badSampleBackground = sbgStr.split("\n") + if not badSampleBackground[j + 44]: + continue del badSampleBackground[j + 44] badSampleBackground = "\n".join(badSampleBackground) - with open("test_data.txt", "w", encoding="utf-8") as f: - f.write("' ' ' ' '/'\n\n") - f.write( - "INSTRUMENT {\n\n" - + str(self.goodInstrument) - + "\n\n}" - ) - f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") - f.write( - "\n\nNORMALISATION {\n\n" - + str(self.goodNormalisation) - + "\n\n}" - ) - f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) - with self.assertRaises(ParserException) as cm: - GudrunFile(loadFile="test_data.txt", format=Format.TXT) - self.assertEqual( - "Whilst parsing Container, an exception occured." - " The input file is most likely of an incorrect format, " - "and some attributes were missing.", - str(cm.exception) - ) + with GudPyContext() as gudpy: + with open(gudpy.testFilePath, "w", encoding="utf-8") as f: + f.write("' ' ' ' '/'\n\n") + f.write( + "INSTRUMENT {\n\n" + + str(self.goodInstrument) + + "\n\n}" + ) + f.write( + "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}") + f.write( + "\n\nNORMALISATION {\n\n" + + str(self.goodNormalisation) + + "\n\n}" + ) + f.write("\n\n{}\n\nEND".format(str(badSampleBackground))) + self.assertRaises( + ParserException, gudpy.loadFromGudrunFile, + gudpy.testFilePath) def testZeroExitGudrun(self): with GudPyContext() as gudpy: - gudpy.loadFromFile( - loadFile=gudpy.gudrunFile.loadFile, format=Format.TXT) + gudpy.loadFromGudrunFile(loadFile=gudpy.io.loadFile) gudpy.setSaveLocation(os.path.splitext( - gudpy.gudrunFile.loadFile)[0]) + gudpy.io.loadFile)[0]) gudpy.runGudrun() self.assertEqual(gudpy.gudrun.exitcode, 0) diff --git a/gudpy/test/test_gudpy_workflows.py b/gudpy/test/test_gudpy_workflows.py index 5e8e2f10..75982660 100644 --- a/gudpy/test/test_gudpy_workflows.py +++ b/gudpy/test/test_gudpy_workflows.py @@ -42,9 +42,8 @@ def __exit__(self, exc_type, exc_value, tb): class TestGudPyWorkflows(TestCase): def getGudFile(self, gudpy, sampleIndex) -> GudFile: - return gudpy.gudrunOutput.sampleOutputs[ - gudpy.gudrunFile.sampleBackgrounds[0].samples[ - sampleIndex].name].gudFile + return gudpy.gudrunFile.sampleBackgrounds[0].samples[ + sampleIndex].dataFiles[0].gudFile def testGudPyDCS(self): with GudPyContext() as gudpy: @@ -70,16 +69,13 @@ def testGudPyDCS(self): self.assertAlmostEqual(dcsLevelPercentage, 13.0, 0) for sample in gudpy.gudrunFile.sampleBackgrounds[0].samples: - mintFilename = ( - os.path.splitext(sample.dataFiles[0])[0] - ) + mintFilename = sample.dataFiles[0].name actualMintFile = ("test/TestData/water-ref/plain/" f"{mintFilename}.mint01") - actualData = open(gudpy.gudrunOutput.sampleOutputs[ - sample.name].outputs[sample.dataFiles[0]][".mint01"], - "r", encoding="utf-8" - ).readlines()[10:] + actualData = open(sample.dataFiles[0].mintFile, + "r", encoding="utf-8" + ).readlines()[10:] expectedData = open( actualMintFile, "r", encoding="utf-8" ).readlines()[10:] @@ -234,18 +230,14 @@ def testGudPyIterateBySubtractingWavelength(self): for x in gudpy.gudrunFile.sampleBackgrounds[0].samples if x.runThisSample ]: - dataFilename = ( - os.path.splitext(sample.dataFiles[0])[0] - ) - + dataFilename = sample.dataFiles[0].name actualMintFile = ( f'test/TestData/water-ref/wavelength{i}/' f'{dataFilename}.mint01' ) actualData = open( - gudpy.gudrunIterator.iterator.gudrunOutputs[-1].output( - sample.name, sample.dataFiles[0], ".mint01"), + sample.dataFiles[0].mintFile, "r", encoding="utf-8" ).readlines()[10:] expectedData = open( @@ -273,12 +265,14 @@ def testGudPyIterateBySubtractingWavelength(self): ) actualData = open( - gudpy.gudrunIterator.iterator.gudrunOutputs[ - len( - gudpy.gudrunIterator.iterator.gudrunOutputs - ) - 2 - ].output( - sample.name, sample.dataFiles[0], ".msubw01"), + os.path.join( + gudpy.projectDir, "Gudrun", + "Inelasticity_Subtraction_(WavelengthIteration)", + f"WavelengthIteration_{i}", + sample.name, + sample.dataFiles[0].name, + "Diagnostics", + f"{sample.dataFiles[0].name}.msubw01"), "r", encoding="utf-8" ).readlines()[10:] expectedData = open( diff --git a/gudpy/test/test_gudpy_yaml.py b/gudpy/test/test_gudpy_yaml.py index c68a6380..6be08f13 100644 --- a/gudpy/test/test_gudpy_yaml.py +++ b/gudpy/test/test_gudpy_yaml.py @@ -1,150 +1,166 @@ from unittest import TestCase +import tempfile import os from core import gudpy -from core.enums import Format class TestYAML(TestCase): def testYAML(self): - gudpy1 = gudpy.GudPy() - gudpy1.loadFromFile( - loadFile="test/TestData/NIMROD-water/water.txt", - format=Format.TXT - ) - gudpy1.save(path="test/TestData/NIMROD-water/water.yaml") - gf1 = gudpy1.gudrunFile - - gudpy2 = gudpy.GudPy() - gudpy2.loadFromFile( - loadFile="test/TestData/NIMROD-water/water.yaml", - format=Format.YAML - ) - gf2 = gudpy2.gudrunFile - - gf1.instrument.GudrunInputFileDir = os.path.abspath( - gf1.instrument.GudrunInputFileDir) - gf2.instrument.GudrunInputFileDir = os.path.abspath( - gf2.instrument.GudrunInputFileDir) - - self.assertDictEqual( - gf1.instrument.__dict__, gf2.instrument.__dict__) - self.assertDictEqual(gf2.beam.__dict__, gf2.beam.__dict__) - - normalisationDataFilesA = gf1.normalisation.__dict__.pop("dataFiles") - normalisationDataFilesBgA = gf1.normalisation.__dict__.pop( - "dataFilesBg" - ) - normalisationCompositionA = gf1.normalisation.__dict__.pop( - "composition" - ) - normalisationElementsA = normalisationCompositionA.__dict__.pop( - "elements" - ) - - normalisationDataFilesB = gf2.normalisation.__dict__.pop("dataFiles") - normalisationDataFilesBgB = gf2.normalisation.__dict__.pop( - "dataFilesBg" - ) - normalisationCompositionB = gf2.normalisation.__dict__.pop( - "composition" - ) - normalisationElementsB = normalisationCompositionB.__dict__.pop( - "elements" - ) - - self.assertDictEqual( - normalisationDataFilesA.__dict__, normalisationDataFilesB.__dict__ - ) - self.assertDictEqual( - normalisationDataFilesBgA.__dict__, - normalisationDataFilesBgB.__dict__, - ) - self.assertDictEqual( - normalisationCompositionA.__dict__, - normalisationCompositionB.__dict__, - ) - self.assertDictEqual( - gf1.normalisation.__dict__, gf2.normalisation.__dict__ - ) - - for elementA, elementB in zip( - normalisationElementsA, normalisationElementsB - ): - self.assertDictEqual(elementA.__dict__, elementB.__dict__) - - sampleBackgroundDataFilesA = gf1.sampleBackgrounds[0].__dict__.pop( - "dataFiles" - ) - sampleBackgroundSamplesA = gf1.sampleBackgrounds[0].__dict__.pop( - "samples" - ) - - sampleBackgroundDataFilesB = gf2.sampleBackgrounds[0].__dict__.pop( - "dataFiles" - ) - sampleBackgroundSamplesB = gf2.sampleBackgrounds[0].__dict__.pop( - "samples" - ) - - self.assertDictEqual( - sampleBackgroundDataFilesA.__dict__, - sampleBackgroundDataFilesB.__dict__, - ) - self.assertDictEqual( - gf1.sampleBackgrounds[0].__dict__, - gf2.sampleBackgrounds[0].__dict__, - ) - - for sampleA, sampleB in zip( - sampleBackgroundSamplesA, sampleBackgroundSamplesB - ): - sampleDataFilesA = sampleA.__dict__.pop("dataFiles") - sampleCompositionA = sampleA.__dict__.pop("composition") - sampleElementsA = sampleCompositionA.__dict__.pop("elements") - sampleContainersA = sampleA.__dict__.pop("containers") - - sampleDataFilesB = sampleB.__dict__.pop("dataFiles") - sampleCompositionB = sampleB.__dict__.pop("composition") - sampleElementsB = sampleCompositionB.__dict__.pop("elements") - sampleContainersB = sampleB.__dict__.pop("containers") + with tempfile.TemporaryDirectory() as tmp: + gudpy1 = gudpy.GudPy() + gudpy1.loadFromGudrunFile( + loadFile="test/TestData/NIMROD-water/water.txt", + ) + gudpy1.saveAs(os.path.join(tmp, "water")) + gf1 = gudpy1.gudrunFile + + gudpy2 = gudpy.GudPy() + gudpy2.loadFromProject(os.path.join(tmp, "water")) + gf2 = gudpy2.gudrunFile + + gf1.instrument.GudrunInputFileDir = os.path.abspath( + gf1.instrument.GudrunInputFileDir) + gf2.instrument.GudrunInputFileDir = os.path.abspath( + gf2.instrument.GudrunInputFileDir) self.assertDictEqual( - sampleDataFilesA.__dict__, sampleDataFilesB.__dict__ + gf1.instrument.__dict__, gf2.instrument.__dict__) + self.assertDictEqual(gf2.beam.__dict__, gf2.beam.__dict__) + + normalisationDataFilesA = gf1.normalisation.__dict__.pop( + "dataFiles") + normalisationDataFilesA.__dict__.pop("_dataFiles") + normalisationDataFilesBgA = gf1.normalisation.__dict__.pop( + "dataFilesBg" + ) + normalisationDataFilesBgA.__dict__.pop("_dataFiles") + normalisationCompositionA = gf1.normalisation.__dict__.pop( + "composition" + ) + normalisationElementsA = normalisationCompositionA.__dict__.pop( + "elements" + ) + + normalisationDataFilesB = gf2.normalisation.__dict__.pop( + "dataFiles") + normalisationDataFilesB.__dict__.pop("_dataFiles") + normalisationDataFilesBgB = gf2.normalisation.__dict__.pop( + "dataFilesBg" + ) + normalisationDataFilesBgB.__dict__.pop("_dataFiles") + normalisationCompositionB = gf2.normalisation.__dict__.pop( + "composition" + ) + normalisationElementsB = normalisationCompositionB.__dict__.pop( + "elements" + ) + + self.assertDictEqual( + normalisationDataFilesA.__dict__, + normalisationDataFilesB.__dict__ + ) + self.assertDictEqual( + normalisationDataFilesBgA.__dict__, + normalisationDataFilesBgB.__dict__, + ) + self.assertDictEqual( + normalisationCompositionA.__dict__, + normalisationCompositionB.__dict__, ) self.assertDictEqual( - sampleCompositionA.__dict__, sampleCompositionB.__dict__ + gf1.normalisation.__dict__, gf2.normalisation.__dict__ ) - for elementA, elementB in zip(sampleElementsA, sampleElementsB): + + for elementA, elementB in zip( + normalisationElementsA, normalisationElementsB + ): self.assertDictEqual(elementA.__dict__, elementB.__dict__) - self.assertDictEqual(sampleA.__dict__, sampleB.__dict__) + sampleBackgroundDataFilesA = gf1.sampleBackgrounds[0].__dict__.pop( + "dataFiles" + ) + sampleBackgroundDataFilesA.__dict__.pop("_dataFiles") + sampleBackgroundSamplesA = gf1.sampleBackgrounds[0].__dict__.pop( + "samples" + ) - for containerA, containerB in zip( - sampleContainersA, sampleContainersB - ): - containerDataFilesA = containerA.__dict__.pop("dataFiles") - containerCompositionA = containerA.__dict__.pop("composition") - containerElementsA = containerCompositionA.__dict__.pop( - "elements" - ) + sampleBackgroundDataFilesB = gf2.sampleBackgrounds[0].__dict__.pop( + "dataFiles" + ) + sampleBackgroundDataFilesB.__dict__.pop("_dataFiles") + sampleBackgroundSamplesB = gf2.sampleBackgrounds[0].__dict__.pop( + "samples" + ) - containerDataFilesB = containerB.__dict__.pop("dataFiles") - containerCompositionB = containerB.__dict__.pop("composition") - containerElementsB = containerCompositionB.__dict__.pop( - "elements" - ) + self.assertDictEqual( + sampleBackgroundDataFilesA.__dict__, + sampleBackgroundDataFilesB.__dict__, + ) + self.assertDictEqual( + gf1.sampleBackgrounds[0].__dict__, + gf2.sampleBackgrounds[0].__dict__, + ) + + for sampleA, sampleB in zip( + sampleBackgroundSamplesA, sampleBackgroundSamplesB + ): + sampleDataFilesA = sampleA.__dict__.pop("dataFiles") + sampleDataFilesA.__dict__.pop("_dataFiles") + sampleCompositionA = sampleA.__dict__.pop("composition") + sampleElementsA = sampleCompositionA.__dict__.pop("elements") + sampleContainersA = sampleA.__dict__.pop("containers") + + sampleDataFilesB = sampleB.__dict__.pop("dataFiles") + sampleDataFilesB.__dict__.pop("_dataFiles") + sampleCompositionB = sampleB.__dict__.pop("composition") + sampleElementsB = sampleCompositionB.__dict__.pop("elements") + sampleContainersB = sampleB.__dict__.pop("containers") self.assertDictEqual( - containerDataFilesA.__dict__, containerDataFilesB.__dict__ + sampleDataFilesA.__dict__, sampleDataFilesB.__dict__ ) self.assertDictEqual( - containerCompositionA.__dict__, - containerCompositionB.__dict__, + sampleCompositionA.__dict__, sampleCompositionB.__dict__ ) for elementA, elementB in zip( - containerElementsA, containerElementsB - ): + sampleElementsA, sampleElementsB): self.assertDictEqual(elementA.__dict__, elementB.__dict__) - self.assertDictEqual(containerA.__dict__, containerB.__dict__) + self.assertDictEqual(sampleA.__dict__, sampleB.__dict__) + + for containerA, containerB in zip( + sampleContainersA, sampleContainersB + ): + containerDataFilesA = containerA.__dict__.pop("dataFiles") + containerDataFilesA.__dict__.pop("_dataFiles") + containerCompositionA = containerA.__dict__.pop( + "composition") + containerElementsA = containerCompositionA.__dict__.pop( + "elements" + ) + + containerDataFilesB = containerB.__dict__.pop("dataFiles") + containerDataFilesB.__dict__.pop("_dataFiles") + containerCompositionB = containerB.__dict__.pop( + "composition") + containerElementsB = containerCompositionB.__dict__.pop( + "elements" + ) + + self.assertDictEqual( + containerDataFilesA.__dict__, + containerDataFilesB.__dict__ + ) + self.assertDictEqual( + containerCompositionA.__dict__, + containerCompositionB.__dict__, + ) + for elementA, elementB in zip( + containerElementsA, containerElementsB + ): + self.assertDictEqual( + elementA.__dict__, elementB.__dict__) + + self.assertDictEqual( + containerA.__dict__, containerB.__dict__) diff --git a/gudpy/test/test_gudrun_classes.py b/gudpy/test/test_gudrun_classes.py index a783058d..7e69a585 100644 --- a/gudpy/test/test_gudrun_classes.py +++ b/gudpy/test/test_gudrun_classes.py @@ -1,7 +1,6 @@ from unittest import TestCase from core.exception import ParserException -from core.gudrun_file import GudrunFile from core.beam import Beam from core.composition import Composition from core.container import Container @@ -15,17 +14,21 @@ FTModes, Instruments, Scales, UnitsOfDensity, MergeWeights, NormalisationType, OutputUnits, Geometry, CrossSectionSource ) +from core.io.gudpy_io import GudPyIO class TestGudrunClasses(TestCase): def testEmptyPath(self): - emptyPath = "" - self.assertRaises(RuntimeError, GudrunFile, loadFile=emptyPath) + gudpyIO = GudPyIO() + self.assertRaises( + ParserException, gudpyIO.importGudrunFile, emptyPath) def testInvalidPath(self): invalidPath = "invalid_path" - self.assertRaises(ParserException, GudrunFile, loadFile=invalidPath) + gudpyIO = GudPyIO() + self.assertRaises( + ParserException, gudpyIO.importGudrunFile, invalidPath) def testInstrumentInitDataTypes(self): diff --git a/gudpy/test/test_purge_file.py b/gudpy/test/test_purge_file.py index cf138845..bd69ae7a 100644 --- a/gudpy/test/test_purge_file.py +++ b/gudpy/test/test_purge_file.py @@ -1,37 +1,21 @@ import os from unittest import TestCase -from shutil import copyfile +import tempfile from core.purge_file import PurgeFile -from core.enums import Format from core import gudpy +from core.io.gudpy_io import GudPyIO class TestPurgeFile(TestCase): def setUp(self) -> None: - path = "TestData/NIMROD-water/water.txt" - - if os.name == "nt": - from pathlib import Path - dirpath = Path().resolve() / "test/" / Path(path) - else: - dirpath = ( - "/".join(os.path.realpath(__file__).split("/")[:-1]) - + "/" - + path - ) + path = f"TestData{os.path.sep}NIMROD-water{os.path.sep}water.txt" + dirpath = (os.path.dirname(__file__)) self.gudpy = gudpy.GudPy() self.keepsakes = os.listdir() - copyfile(dirpath, "test/TestData/NIMROD-water/good_water.txt") - self.gudpy.loadFromFile( - loadFile="test/TestData/NIMROD-water/good_water.txt", - format=Format.TXT) - - self.gudpy.gudrunFile.write_out( - path="test/TestData/NIMROD-water/good_water.txt", - overwrite=True - ) + refFile = os.path.join(dirpath, path) + self.gudpy.loadFromGudrunFile(loadFile=refFile) self.g = self.gudpy.gudrunFile self.expectedPurgeFile = { "standardDeviation": (10, 10), @@ -42,7 +26,6 @@ def setUp(self) -> None: return super().setUp() def tearDown(self) -> None: - [os.remove(f) for f in os.listdir() if f not in self.keepsakes] return super().tearDown() @@ -58,9 +41,11 @@ def testCreatePurgeClass(self): ) def testWritePurgeFile(self): - - purge = PurgeFile(self.g) - purge.write_out() - with open("purge_det.dat", encoding="utf-8") as f: - outlines = f.read() - self.assertEqual(outlines, str(purge)) + with tempfile.TemporaryDirectory() as tmp: + path = os.path.join(tmp, "purge_det.dat") + purge = PurgeFile(self.g) + GudPyIO.writeObject(purge, path) + with open(path, encoding="utf-8") as f: + outlines = f.read() + self.assertEqual(outlines, str(purge)) + f.close()