From d79828b9fd294337c13b9d92be21438365b5c460 Mon Sep 17 00:00:00 2001 From: Daniel Pozsar Date: Sat, 9 Nov 2024 18:30:37 +0100 Subject: [PATCH] added better input --- README.md | 1 + input.fdf | 12 +- src/grogupy/core.py | 181 +++++++++++++------------- src/grogupy/grogu.py | 176 ++++++++++++++++++++------ src/grogupy/io.py | 295 +++++++++++++++++++++++++++++++++---------- test.ipynb | 105 +-------------- 6 files changed, 467 insertions(+), 303 deletions(-) diff --git a/README.md b/README.md index c355a29..6871ccf 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ More on the theoretical background can be seen on [arXiv](https://arxiv.org/abs/ - Run tests on different magnetic materials and compare it to Grogu Matlab --> ran on Jij_for_Marci_6p45ang, but I could not compare data ## Developing +- Magnetic entities cannot handle orbitals, only shells - Use ReadThe Docs [addons](https://docs.readthedocs.io/en/stable/addons.html) - Check the symmetrization of the Hamiltonian and overlap matrix to make them hermitian - Check if exchange field has scalar part diff --git a/input.fdf b/input.fdf index 050a17e..df6a529 100644 --- a/input.fdf +++ b/input.fdf @@ -11,13 +11,13 @@ ScfXcfOrientation [ 0 0 1 ] # comment test %endblock XCFRotation # comment test %block MagneticEntities # comment test -Cluster 4 5 # this is a list of atoms -AtomShell 3 2 # this is one atom and shell index -AtomShell 4 2 # this is one atom and shell index -AtomShell 5 2 # this is one atom and shell index +Cluster 4 5 # this is a list of atoms +AtomShell 3 2 # this is one atom and shell index +AtomShell 4 2 3 # this is one atom and shell index +AtomShell 5 2 # this is one atom and shell index -AtomOrbital 3 # this is one atom and orbital index -Orbitals # this is a slice of orbital index +AtomOrbital 3 7 2 4 # this is one atom and orbital index +Orbitals 2 1 4 9 # this is a slice of orbital index %endblock MagneticEntities diff --git a/src/grogupy/core.py b/src/grogupy/core.py index 125bfae..509a200 100644 --- a/src/grogupy/core.py +++ b/src/grogupy/core.py @@ -18,6 +18,11 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +"""This contains the core functions for the grogupy script. + + +""" + import numpy as np from numpy.linalg import inv @@ -25,57 +30,23 @@ from grogupy.magnetism import blow_up_orbindx, parse_magnetic_entity from grogupy.utilities import commutator -def parallel_Gk(HK, SK, eran, eset): - """Calculates the Greens function by inversion. - - It calculates the Greens function on all the energy levels at the same time. - - Args: - HK : (NO, NO), np.array_like - Hamiltonian at a given k point - SK : (NO, NO), np.array_like - Overlap Matrix at a given k point - eran : (eset) np.array_like - Energy sample along the contour - eset : int - Number of energy samples along the contour - - Returns: - Gk : (eset, NO, NO), np.array_like - Green's function at a given k point - """ - - # Calculates the Greens function on all the energy levels - return inv(SK * eran.reshape(eset, 1, 1) - HK) - - -def sequential_GK(HK, SK, eran, eset): - """Calculates the Greens function by inversion. +def onsite_projection(matrix, idx1, idx2): + """It produces the slices of a matrix for the on site projection. - It calculates sequentially over the energy levels. + The slicing is along the last two axes as these contains the orbital indexing. Args: - HK : (NO, NO), np.array_like - Hamiltonian at a given k point - SK : (NO, NO), np.array_like - Overlap Matrix at a given k point - eran : (eset) np.array_like - Energy sample along the contour - eset : int - Number of energy samples along the contour + matrix : (..., :, :) np.array_like + Some matrix + idx : np.array_like + The indexes of the orbitals Returns: - Gk : (eset, NO, NO), np.array_like - Green's function at a given k point + np.array_like + Reduced matrix based on the projection """ - # creates an empty holder - Gk = np.zeros(shape=(eset, HK.shape[0], HK.shape[1]), dtype="complex128") - # fills the holder sequentially by the Greens function on a given energy - for j in range(eset): - Gk[j] = inv(SK * eran[j] - HK) - - return Gk + return matrix[..., idx1, :][..., idx2] def calc_Vu(H, Tu): @@ -100,40 +71,6 @@ def calc_Vu(H, Tu): return Vu1, Vu2 -def remove_clutter_for_save(pairs, magnetic_entities): - """Removes unimportant data from the dictionaries. - - It is used before saving to throw away data that - is not needed for post processing. - - Args: - pairs : dict - Contains all the pair information - magnetic_entities : dict - Contains all the magnetic entity information - - Returns: - pairs : dict - Contains all the reduced pair information - magnetic_entities : dict - Contains all the reduced magnetic entity information - """ - - # remove clutter from magnetic entities and pair information - for pair in pairs: - del pair["Gij"] - del pair["Gij_tmp"] - del pair["Gji"] - del pair["Gji_tmp"] - for mag_ent in magnetic_entities: - del mag_ent["Gii"] - del mag_ent["Gii_tmp"] - del mag_ent["Vu1"] - del mag_ent["Vu2"] - - return pairs, magnetic_entities - - def build_hh_ss(dh): """It builds the Hamiltonian and Overlap matrix from the sisl.dh class. @@ -362,20 +299,88 @@ def setup_pairs_and_magnetic_entities( return pairs, magnetic_entities -def onsite_projection(matrix, idx1, idx2): - """It produces the slices of a matrix for the on site projection. +def parallel_Gk(HK, SK, eran, eset): + """Calculates the Greens function by inversion. - The slicing is along the last two axes as these contains the orbital indexing. + It calculates the Greens function on all the energy levels at the same time. Args: - matrix : (..., :, :) np.array_like - Some matrix - idx : np.array_like - The indexes of the orbitals + HK : (NO, NO), np.array_like + Hamiltonian at a given k point + SK : (NO, NO), np.array_like + Overlap Matrix at a given k point + eran : (eset) np.array_like + Energy sample along the contour + eset : int + Number of energy samples along the contour Returns: - np.array_like - Reduced matrix based on the projection + Gk : (eset, NO, NO), np.array_like + Green's function at a given k point """ - return matrix[..., idx1, :][..., idx2] + # Calculates the Greens function on all the energy levels + return inv(SK * eran.reshape(eset, 1, 1) - HK) + + +def sequential_GK(HK, SK, eran, eset): + """Calculates the Greens function by inversion. + + It calculates sequentially over the energy levels. + + Args: + HK : (NO, NO), np.array_like + Hamiltonian at a given k point + SK : (NO, NO), np.array_like + Overlap Matrix at a given k point + eran : (eset) np.array_like + Energy sample along the contour + eset : int + Number of energy samples along the contour + + Returns: + Gk : (eset, NO, NO), np.array_like + Green's function at a given k point + """ + + # creates an empty holder + Gk = np.zeros(shape=(eset, HK.shape[0], HK.shape[1]), dtype="complex128") + # fills the holder sequentially by the Greens function on a given energy + for j in range(eset): + Gk[j] = inv(SK * eran[j] - HK) + + return Gk + + +def remove_clutter_for_save(pairs, magnetic_entities): + """Removes unimportant data from the dictionaries. + + It is used before saving to throw away data that + is not needed for post processing. + + Args: + pairs : dict + Contains all the pair information + magnetic_entities : dict + Contains all the magnetic entity information + + Returns: + pairs : dict + Contains all the reduced pair information + magnetic_entities : dict + Contains all the reduced magnetic entity information + """ + + # remove clutter from magnetic entities and pair information + for pair in pairs: + del pair["Gij"] + del pair["Gij_tmp"] + del pair["Gji"] + del pair["Gji_tmp"] + for mag_ent in magnetic_entities: + del mag_ent["Gii"] + del mag_ent["Gii_tmp"] + del mag_ent["Vu1"] + del mag_ent["Vu2"] + + return pairs, magnetic_entities diff --git a/src/grogupy/grogu.py b/src/grogupy/grogu.py index 0836f7c..a79c39f 100644 --- a/src/grogupy/grogu.py +++ b/src/grogupy/grogu.py @@ -19,6 +19,7 @@ # SOFTWARE. import warnings +from argparse import ArgumentParser from sys import getsizeof from timeit import default_timer as timer @@ -46,46 +47,92 @@ except: from grogupy import * -def main(): - # runtime information - times = dict() - times["start_time"] = timer() +def parse_command_line(): + """This function can read input from the command line.""" - # input output stuff - ###################################################################### - ###################################################################### - ###################################################################### + parser = ArgumentParser() - infile = "/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf" - outfile = "./Fe3GeTe2_notebook" + parser.add_argument( + "-i", + "--input", + dest="infile", + default=None, + type=str, + help="Input file name", + required=True, + ) + parser.add_argument( + "-o", + "--output", + dest="outfile", + default=None, + type=str, + help="Output file name", + ) - magnetic_entities = [ - dict(atom=3, l=2), - dict(atom=4, l=2), - dict(atom=5, l=2), - ] - pairs = [ - dict(ai=0, aj=1, Ruc=np.array([0, 0, 0])), - dict(ai=0, aj=2, Ruc=np.array([0, 0, 0])), - dict(ai=1, aj=2, Ruc=np.array([0, 0, 0])), - dict(ai=0, aj=2, Ruc=np.array([-1, -1, 0])), - dict(ai=1, aj=2, Ruc=np.array([-1, -1, 0])), - dict(ai=0, aj=2, Ruc=np.array([-1, 0, 0])), - dict(ai=1, aj=2, Ruc=np.array([-1, 0, 0])), - dict(ai=1, aj=2, Ruc=np.array([-2, 0, 0])), - dict(ai=1, aj=2, Ruc=np.array([-3, 0, 0])), - ] - simulation_parameters = default_args - simulation_parameters["infile"] = infile - simulation_parameters["outfile"] = outfile - simulation_parameters["kset"] = 20 - simulation_parameters["kdirs"] = "xy" - simulation_parameters["eset"] = 600 - simulation_parameters["esetp"] = 10000 + # parser.add_argument('--scf-orientation', dest = 'scf_xcf_orientation', default = None, help = 'Output file name') + # parser.add_argument('--ref-orientation', dest = 'ref_xcf_orientations', default = None, help = 'Output file name') - ###################################################################### - ###################################################################### - ###################################################################### + parser.add_argument( + "--kset", + dest="kset", + default=None, + type=int, + help="k-space resolution of calculation", + ) + parser.add_argument( + "--kdirs", + dest="kdirs", + default=None, + type=str, + help="Definition of k-space dimensionality", + ) + parser.add_argument( + "--ebot", + dest="ebot", + default=None, + type=float, + help="Bottom energy of the contour", + ) + parser.add_argument( + "--eset", + dest="eset", + default=None, + type=int, + help="Number of energy points on the contour", + ) + parser.add_argument( + "--eset-p", + dest="esetp", + default=None, + type=int, + help="Parameter tuning the distribution on the contour", + ) + + parser.add_argument( + "--parallel-green", + dest="parallel_solver_for_Gk", + default=None, + type=bool, + help="Whether to use the parallel or sequential solver for Greens function", + ) + parser.add_argument( + "--padawan-mode", + dest="padawan_mode", + default=None, + type=bool, + help="If it is on it turns on extra helpful information for new users", + ) + + cmd_line_args = parser.parse_args() + + return cmd_line_args + + +def main(simulation_parameters, magnetic_entities, pairs): + # runtime information + times = dict() + times["start_time"] = timer() # MPI parameters comm = MPI.COMM_WORLD @@ -458,4 +505,59 @@ def main(): if __name__ == "__main__": - main() + # loading parameters + # it is not clear how to give grogu.fdf path... + command_line_arguments = parse_command_line() + fdf_arguments, magnetic_entities, pairs = read_fdf(command_line_arguments["infile"]) + + # right now we do not use any of these input, but it shows + # the order of priority when processing arguments + default_arguments = False + fdf_arguments = False + command_line_arguments = False + simulation_parameters = process_input_args( + default_arguments, fdf_arguments, command_line_arguments, ACCEPTED_INPUTS + ) + + #################################################################################################### + # This is the input file for now # + #################################################################################################### + magnetic_entities = [ + dict(atom=3, l=2), + dict(atom=4, l=2), + dict(atom=5, l=2), + ] + pairs = [ + dict(ai=0, aj=1, Ruc=np.array([0, 0, 0])), + dict(ai=0, aj=2, Ruc=np.array([0, 0, 0])), + dict(ai=1, aj=2, Ruc=np.array([0, 0, 0])), + dict(ai=0, aj=2, Ruc=np.array([-1, -1, 0])), + dict(ai=1, aj=2, Ruc=np.array([-1, -1, 0])), + dict(ai=0, aj=2, Ruc=np.array([-1, 0, 0])), + dict(ai=1, aj=2, Ruc=np.array([-1, 0, 0])), + dict(ai=1, aj=2, Ruc=np.array([-2, 0, 0])), + dict(ai=1, aj=2, Ruc=np.array([-3, 0, 0])), + ] + simulation_parameters = dict() + simulation_parameters["infile"] = ( + "/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf" + ) + simulation_parameters["outfile"] = "./Fe3GeTe2_notebook" + simulation_parameters["scf_xcf_orientation"] = np.array([0, 0, 1]) + simulation_parameters["ref_xcf_orientations"] = [ + dict(o=np.array([1, 0, 0]), vw=[np.array([0, 1, 0]), np.array([0, 0, 1])]), + dict(o=np.array([0, 1, 0]), vw=[np.array([1, 0, 0]), np.array([0, 0, 1])]), + dict(o=np.array([0, 0, 1]), vw=[np.array([1, 0, 0]), np.array([0, 1, 0])]), + ] + simulation_parameters["kset"] = 20 + simulation_parameters["kdirs"] = "xy" + simulation_parameters["ebot"] = None + simulation_parameters["eset"] = 600 + simulation_parameters["esetp"] = 10000 + simulation_parameters["parallel_solver_for_Gk"] = False + simulation_parameters["padawan_mode"] = True + #################################################################################################### + # This is the input file for now # + #################################################################################################### + + main(simulation_parameters, magnetic_entities, pairs) diff --git a/src/grogupy/io.py b/src/grogupy/io.py index 3a1821f..330df0b 100644 --- a/src/grogupy/io.py +++ b/src/grogupy/io.py @@ -18,12 +18,27 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -from argparse import ArgumentParser from pickle import dump, load import numpy as np - -default_args = dict( +from sisl.io import fdfSileSiesta + +# list of accepted input parameters +ACCEPTED_INPUTS = [ + "infile", + "outfile", + "scf_xcf_orientation", + "ref_xcf_orientations", + "kset", + "kdirs", + "ebot", + "eset", + "esetp", + "parallel_solver_for_Gk", + "padawan_mode", +] + +default_arguments = dict( infile=None, outfile=None, scf_xcf_orientation=np.array([0, 0, 1]), @@ -41,18 +56,158 @@ default_args = dict( padawan_mode=True, ) -# parser = ArgumentParser() -# parser.add_argument('--input' , dest = 'infile' , default=None , help = 'Input file name') -# parser.add_argument('--output' , dest = 'outfile', default=None , help = 'Output file name') +def read_fdf(path): + """It reads the simulation parameters, magnetic entities and pairs from the fdf. -# parser.add_argument('--kset' , dest = 'kset' , default = 2 , type=int , help = 'k-space resolution of Jij calculation') -# parser.add_argument('--kdirs' , dest = 'kdirs' , default = 'xyz' , help = 'Definition of k-space dimensionality') -# parser.add_argument('--ebot' , dest = 'ebot' , default = None , type=float, help = 'Bottom energy of the contour') -# parser.add_argument('--eset' , dest = 'eset' , default = 42 , type=int , help = 'Number of energy points on the contour') -# parser.add_argument('--eset-p' , dest = 'esetp' , default = 1000 , type=int , help = 'Parameter tuning the distribution on the contour') + Args: + path : string + The path to the .fdf file -# cmd_line_args = parser.parse_args() + Returns: + fdf_arguments : dict + The read input arguments from the fdf file + magnetic_entities : list + It contains the dictionaries associated with the magnetic entities + pairs : dict + It contains the dictionaries associated with the pair information + """ + + # read fdf file + fdf = fdfSileSiesta(path) + fdf_arguments = dict() + + InputFile = fdf.get("InputFile") + if InputFile is not None: + fdf_arguments["infile"] = InputFile + + OutputFile = fdf.get("OutputFile") + if OutputFile is not None: + fdf_arguments["outfile"] = OutputFile + + ScfXcfOrientation = fdf.get("ScfXcfOrientation") + if ScfXcfOrientation is not None: + fdf_arguments["scf_xcf_orientation"] = np.array(ScfXcfOrientation) + + XCF_Rotation = fdf.get("XCF_Rotation") + if XCF_Rotation is not None: + rotations = [] + # iterate over rows + for rot in XCF_Rotation: + # convert row to dictionary + dat = np.array(rot.split()[:9], dtype=float) + o = dat[:3] + vw = dat[3:].reshape(2, 3) + rotations.append(dict(o=o, vw=vw)) + fdf_arguments["ref_xcf_orientations"] = rotations + + Kset = fdf.get("INTEGRAL.Kset") + if Kset is not None: + fdf_arguments["kset"] = Kset + + Kdirs = fdf.get("INTEGRAL.Kdirs") + if Kdirs is not None: + fdf_arguments["kdirs"] = Kdirs + + # This is permitted because it means automatic Ebot definition + fdf_arguments["ebot"] = fdf.get("INTEGRAL.Ebot") + + Eset = fdf.get("INTEGRAL.Eset") + if Eset is not None: + fdf_arguments["eset"] = Eset + + Esetp = fdf.get("INTEGRAL.Esetp") + if Esetp is not None: + fdf_arguments["esetp"] = Esetp + + ParallelSolver = fdf.get("GREEN.ParallelSolver") + if ParallelSolver is not None: + fdf_arguments["parallel_solver_for_Gk"] = ParallelSolver + + PadawanMode = fdf.get("PadawanMode") + if PadawanMode is not None: + fdf_arguments["padawan_mode"] = PadawanMode + + Pairs = fdf.get("Pairs") + if Pairs is not None: + pairs = [] + # iterate over rows + for fdf_pair in Pairs: + # convert data + dat = np.array(fdf_pair.split()[:5], dtype=int) + # create pair dictionary + my_pair = dict(ai=dat[0], aj=dat[1], Ruc=np.array(dat[2:])) + pairs.append(my_pair) + + MagneticEntities = fdf.get("MagneticEntities") + if MagneticEntities is not None: + magnetic_entities = [] + for mag_ent in MagneticEntities: + row = mag_ent.split() + dat = [] + for string in row: + if string.find("#") != -1: + break + dat.append(string) + if dat[0] in {"Cluster", "cluster"}: + magnetic_entities.append(dict(atom=[int(_) for _ in dat[1:]])) + continue + elif dat[0] in {"AtomShell", "Atomshell", "atomShell", "atomshell"}: + magnetic_entities.append( + dict(atom=int(dat[1]), l=[int(_) for _ in dat[2:]]) + ) + continue + elif dat[0] in {"AtomOrbital", "Atomorbital", "tomOrbital", "atomorbital"}: + magnetic_entities.append( + dict(atom=int(dat[1]), orb=[int(_) for _ in dat[2:]]) + ) + continue + elif dat[0] in {"Orbitals", "orbitals"}: + magnetic_entities.append(dict(orb=[int(_) for _ in dat[1:]])) + continue + else: + raise Exception("Unrecognizable magnetic entity in .fdf!") + + return fdf_arguments, magnetic_entities, pairs + + +def process_input_args( + default_arguments, + fdf_arguments, + command_line_arguments, + accepted_inputs=ACCEPTED_INPUTS, +): + """It returns the final simulation parameters based on the inputs. + + The merging is done in the order of priority: + 1. command line arguments + 2. fdf arguments + 3. default arguments + + Args: + default_arguments : dict + Default arguments from grogupy + fdf_arguments : dict + Arguments read from the fdf input file + command_line_arguments : dict + Arguments from the command line + + Returns: + dict + The final simulation parameters + """ + + # iterate over fdf_arguments and update default arguments + for key, value in fdf_arguments.values(): + if value is not None and key in accepted_inputs: + default_arguments[key] = value + + # iterate over command_line_arguments and update default arguments + for key, value in command_line_arguments.values(): + if value is not None and key in accepted_inputs: + default_arguments[key] = value + + return default_arguments def save_pickle(outfile, data): @@ -89,6 +244,64 @@ def load_pickle(infile): return data +def print_job_description(simulation_parameters): + """It prints the parameters and the description of the job. + + + Args: + simulation_parameters : dict + It contains the simulations parameters + """ + + print( + "================================================================================================================================================================" + ) + print("Input file: ") + print(simulation_parameters["infile"]) + print("Output file: ") + print(simulation_parameters["outfile"]) + print( + "Number of nodes in the parallel cluster: ", + simulation_parameters["parallel_size"], + ) + if simulation_parameters["parallel_solver_for_Gk"]: + print("solver used for Greens function calculation: parallel") + else: + print("solver used for Greens function calculation: sequential") + print( + "================================================================================================================================================================" + ) + print("Cell [Ang]: ") + print(simulation_parameters["cell"]) + print( + "================================================================================================================================================================" + ) + print("DFT axis: ") + print(simulation_parameters["scf_xcf_orientation"]) + print("Quantization axis and perpendicular rotation directions:") + for ref in simulation_parameters["ref_xcf_orientations"]: + print(ref["o"], " --» ", ref["vw"]) + print( + "================================================================================================================================================================" + ) + print("Parameters for the contour integral:") + print("Number of k points: ", simulation_parameters["kset"]) + print("k point directions: ", simulation_parameters["kdirs"]) + if simulation_parameters["automatic_ebot"]: + print( + "Ebot: ", + simulation_parameters["ebot"], + " WARNING: This was automatically determined!", + ) + else: + print("Ebot: ", simulation_parameters["ebot"]) + print("Eset: ", simulation_parameters["eset"]) + print("Esetp: ", simulation_parameters["esetp"]) + print( + "================================================================================================================================================================" + ) + + def print_parameters(simulation_parameters): """It prints the simulation parameters for the grogu out. @@ -255,61 +468,3 @@ def print_runtime_information(times): print( f"Calculate energies and magnetic components: {times['end_time'] - times['green_function_inversion_time']:.3f} s" ) - - -def print_job_description(simulation_parameters): - """It prints the parameters and the description of the job. - - - Args: - simulation_parameters : dict - It contains the simulations parameters - """ - - print( - "================================================================================================================================================================" - ) - print("Input file: ") - print(simulation_parameters["infile"]) - print("Output file: ") - print(simulation_parameters["outfile"]) - print( - "Number of nodes in the parallel cluster: ", - simulation_parameters["parallel_size"], - ) - if simulation_parameters["parallel_solver_for_Gk"]: - print("solver used for Greens function calculation: parallel") - else: - print("solver used for Greens function calculation: sequential") - print( - "================================================================================================================================================================" - ) - print("Cell [Ang]: ") - print(simulation_parameters["cell"]) - print( - "================================================================================================================================================================" - ) - print("DFT axis: ") - print(simulation_parameters["scf_xcf_orientation"]) - print("Quantization axis and perpendicular rotation directions:") - for ref in simulation_parameters["ref_xcf_orientations"]: - print(ref["o"], " --» ", ref["vw"]) - print( - "================================================================================================================================================================" - ) - print("Parameters for the contour integral:") - print("Number of k points: ", simulation_parameters["kset"]) - print("k point directions: ", simulation_parameters["kdirs"]) - if simulation_parameters["automatic_ebot"]: - print( - "Ebot: ", - simulation_parameters["ebot"], - " WARNING: This was automatically determined!", - ) - else: - print("Ebot: ", simulation_parameters["ebot"]) - print("Eset: ", simulation_parameters["eset"]) - print("Esetp: ", simulation_parameters["esetp"]) - print( - "================================================================================================================================================================" - ) diff --git a/test.ipynb b/test.ipynb index 1ddcb62..b7f8626 100644 --- a/test.ipynb +++ b/test.ipynb @@ -16,16 +16,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "info:0: SislInfo: Please install tqdm (pip install tqdm) for better looking progress bars\n" - ] - }, { "name": "stdout", "output_type": "stream", @@ -38,7 +31,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "[Daniels-Air:00184] shmem: mmap: an error occurred while determining whether or not /var/folders/yh/dx7xl94n3g52ts3td8qcxjcc0000gn/T//ompi.Daniels-Air.501/jf.0/273678336/sm_segment.Daniels-Air.501.10500000.0 could be created.\n" + "[Mac:30692] shmem: mmap: an error occurred while determining whether or not /var/folders/yh/dx7xl94n3g52ts3td8qcxjcc0000gn/T//ompi.Mac.501/jf.0/6815744/sm_segment.Mac.501.680000.0 could be created.\n" ] } ], @@ -70,103 +63,11 @@ " print(\"numpy version unknown.\")" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "ename": "TypeError", - "evalue": "'NoneType' object is not iterable", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[58], line 28\u001b[0m\n\u001b[1;32m 26\u001b[0m magnetic_entities \u001b[38;5;241m=\u001b[39m fdf\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mMagneticEntities\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 27\u001b[0m fdf_magnetic_entities \u001b[38;5;241m=\u001b[39m []\n\u001b[0;32m---> 28\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m mag_ent \u001b[38;5;129;01min\u001b[39;00m magnetic_entities:\n\u001b[1;32m 29\u001b[0m row \u001b[38;5;241m=\u001b[39m mag_ent\u001b[38;5;241m.\u001b[39msplit()\n\u001b[1;32m 30\u001b[0m dat \u001b[38;5;241m=\u001b[39m []\n", - "\u001b[0;31mTypeError\u001b[0m: 'NoneType' object is not iterable" - ] - } - ], - "source": [ - "# open fdf input\n", - "fdf = sisl.io.fdfSileSiesta(\"input.fdf\")\n", - "\n", - "fdf_parameters = dict()\n", - "fdf_parameters[\"infile\"] = fdf.get(\"InputFile\")\n", - "fdf_parameters[\"outfile\"] = fdf.get(\"OutputFile\")\n", - "fdf_parameters[\"scf_xcf_orientation\"] = np.array(fdf.get(\"ScfXcfOrientation\"))\n", - "\n", - "rotations = fdf.get(\"XCF_Rotation\")\n", - "if rotations is None:\n", - " pass\n", - "else:\n", - " fdf_rot = []\n", - " for rot in rotations:\n", - " dat = np.array(rot.split()[:9], dtype=float)\n", - " o = dat[:3]\n", - " vw = dat[3:]\n", - " vw = vw.reshape(2, 3)\n", - " fdf_rot.append(dict(o=o, vw=vw))\n", - " fdf_parameters[\"ref_xcf_orientations\"] = fdf_rot\n", - "\n", - "pairs = fdf.get(\"Pairs\")\n", - "if pairs in None:\n", - " pass\n", - "else:\n", - " fdf_pairs = []\n", - " for fdf_pair in pairs:\n", - " dat = np.array(fdf_pair.split()[:5], dtype=int)\n", - " my_pair = dict(ai=dat[0], aj=dat[1], Ruc=np.array(dat[2:]))\n", - " fdf_pairs.append(my_pair)\n", - "\n", - "magnetic_entities = fdf.get(\"MagneticEntities\")\n", - "if magnetic_entities is None:\n", - " pass\n", - "else:\n", - " fdf_magnetic_entities = []\n", - " for mag_ent in magnetic_entities:\n", - " row = mag_ent.split()\n", - " dat = []\n", - " for string in row:\n", - " if string.find(\"#\") != -1:\n", - " break\n", - " dat.append(string)\n", - " if dat[0] == \"Cluster\" or \"cluster\":\n", - " fdf_magnetic_entities.append(dict(atom=dat[1:]))\n", - " continue\n", - " elif dat[0] == \"AtomShell\" or \"Atomshell\" or \"atomShell\" or \"atomshell\":\n", - " fdf_magnetic_entities.append(dict(atom=dat[1], l=dat[2:]))\n", - " continue\n", - " elif dat[0] == \"AtomOrbital\" or \"Atomorbital\" or \"tomOrbital\" or \"atomorbital\":\n", - " continue\n", - " elif dat[0] == \"Orbitals\" or \"orbitals\":\n", - " continue\n", - " else:\n", - " print(\"Unrecognizable magnetic entity in fdf\")\n", - " break\n", - "\n", - "\n", - "fdf_parameters[\"kset\"] = fdf.get(\"INTEGRAL.Kset\")\n", - "fdf_parameters[\"kdirs\"] = fdf.get(\"INTEGRAL.Kdirs\")\n", - "fdf_parameters[\"ebot\"] = fdf.get(\"INTEGRAL.Ebot\")\n", - "fdf_parameters[\"eset\"] = fdf.get(\"INTEGRAL.Eset\")\n", - "fdf_parameters[\"esetp\"] = fdf.get(\"INTEGRAL.Esetp\")\n", - "fdf_parameters[\"parallel_solver_for_Gk\"] = fdf.get(\"GREEN.ParallelSolver\")\n", - "fdf_parameters[\"padawan_mode\"] = fdf.get(\"PadawanMode\")" - ] - }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], "source": [ "################################################################################\n", "#################################### INPUT #####################################\n", @@ -217,7 +118,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [ {