From 8af7757c87d73c50fcb00ef2eaac8950fda9a249 Mon Sep 17 00:00:00 2001 From: Daniel Pozsar Date: Tue, 12 Nov 2024 12:15:41 +0100 Subject: [PATCH] WARNING: this may break things, a bunch of stuff changed --- README.md | 66 +++++++----- src/grogupy/__init__.py | 2 +- src/grogupy/{globals.py => constants.py} | 14 +-- src/grogupy/core.py | 17 +-- src/grogupy/grogu.py | 132 ++++++++++++----------- src/grogupy/io.py | 48 ++++++--- src/grogupy/magnetism.py | 36 +++++-- src/grogupy/utilities.py | 54 ++++++---- test.ipynb | 109 ++++++++++--------- tests/test_io.py | 2 +- tests/test_utilities.py | 2 +- 11 files changed, 279 insertions(+), 203 deletions(-) rename src/grogupy/{globals.py => constants.py} (85%) diff --git a/README.md b/README.md index 64da0aa..268ebd6 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,20 @@ # Relativistic magnetic interactions from non-orthogonal basis sets + More on the theoretical background can be seen on [arXiv](https://arxiv.org/abs/2309.02558). -# TODO -## Testing +## TODO + +### Testing + - Test on lat3_791 -- Test on varcell -- Test on varcell_myrun -- Test on varcell_myrun_2 +- Test on `varcell` +- Test on `varcell_myrun` +- Test on `varcell_myrun_2` - Test scaling - Test rotations to x-y 30 and 60 -## Developing +### Developing + - Magnetic entities cannot handle orbitals, only shells - Use ReadThe Docs [addons](https://docs.readthedocs.io/en/stable/addons.html) - Check the symmetrization of the Hamiltonian and overlap matrix to make them hermitian @@ -21,58 +25,70 @@ More on the theoretical background can be seen on [arXiv](https://arxiv.org/abs/ - fdf atom input - orbital indexing must be correct -# Building wheel +## Building wheel + See detailed documentation on [PYPI](https://packaging.python.org/en/latest/tutorials/packaging-projects/). - First you need some API Tokens for Test PYPI ,to be able to upload. You can read about it [here](https://test.pypi.org/help/#apitoken). I own the current project, so you have to contact me. - Use the following commands for a quick setup: + - Build wheel -``` + +``` bash python -m build ``` - Push to PYPI test repository -``` + +``` bash python -m twine upload --repository testpypi dist/* ``` -# Usage -## For end users +## Usage + +### For end users + Download and install from [PYPI](https://test.pypi.org/project/grogu-magn/) test repository. Be aware that this is not up to date yet!!! + +``` bash +python3 -m pip install --index-url https://test.pypi.org/simple/ grogupy ``` -python3 -m pip install --index-url https://test.pypi.org/simple/ grogu_magn -``` -## For developers +### For developers + - Clone repository from Gitea -``` + +``` bash git clone https://gitea.vo.elte.hu/et209d/grogu.git ``` - Create a virtual environment (.venv) (for example with VsCode) - * Use python 3.9.6 - * install dependencies from: - * requirements.txt - * requirements-dev.txt - * /docs/requirements.txt + - Use python 3.9.6 + - install dependencies from: + - requirements.txt + - requirements-dev.txt + - /docs/requirements.txt - Install and run pre-commit -``` + +``` bash pre-commit install pre-commit run --all-files ``` To build the documentation navigate to the `docs/source` folder. Then autogenerate the documentation and build. After this the html page can be found in `docs/source/_build/html`. Follow the commands below. -``` + +``` bash cd docs/source sphinx-apidoc -o ./implementation/ ../../src/ make clean make html ``` -To build a pdf containing the documentation instead of make, first navigate to the `docs/source` folde, then use the rst2pdf extension. -``` + +To build a pdf containing the documentation instead of make, first navigate to the `docs/source` folder, then use the rst2pdf extension. + +``` bash cd docs/source sphinx-apidoc -o ./implementation/ ../../src/ sphinx-build -b pdf . _build/pdf diff --git a/src/grogupy/__init__.py b/src/grogupy/__init__.py index 691ef46..81e521d 100644 --- a/src/grogupy/__init__.py +++ b/src/grogupy/__init__.py @@ -21,8 +21,8 @@ """Docstring in init. """ +from grogupy.constants import * from grogupy.core import * -from grogupy.globals import * from grogupy.io import * from grogupy.magnetism import * from grogupy.utilities import * diff --git a/src/grogupy/globals.py b/src/grogupy/constants.py similarity index 85% rename from src/grogupy/globals.py rename to src/grogupy/constants.py index 9b76a27..b813c4c 100644 --- a/src/grogupy/globals.py +++ b/src/grogupy/constants.py @@ -18,17 +18,19 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from typing import Final + import numpy as np # Pauli matrices -TAU_X = np.array([[0, 1], [1, 0]]) -TAU_Y = np.array([[0, -1j], [1j, 0]]) -TAU_Z = np.array([[1, 0], [0, -1]]) -TAU_0 = np.array([[1, 0], [0, 1]]) +TAU_X: Final[np.array] = np.array([[0, 1], [1, 0]]) +TAU_Y: Final[np.array] = np.array([[0, -1j], [1j, 0]]) +TAU_Z: Final[np.array] = np.array([[1, 0], [0, -1]]) +TAU_0: Final[np.array] = np.array([[1, 0], [0, 1]]) # list of accepted input parameters -ACCEPTED_INPUTS = [ +ACCEPTED_INPUTS: Final[list] = [ "infile", "outfile", "scf_xcf_orientation", @@ -42,7 +44,7 @@ ACCEPTED_INPUTS = [ "padawan_mode", ] -DEFAULT_ARGUMENTS = dict( +DEFAULT_ARGUMENTS: Final[dict] = dict( infile=None, outfile=None, scf_xcf_orientation=np.array([0, 0, 1]), diff --git a/src/grogupy/core.py b/src/grogupy/core.py index 0c632d2..727ebbc 100644 --- a/src/grogupy/core.py +++ b/src/grogupy/core.py @@ -23,12 +23,13 @@ import numpy as np from numpy.linalg import inv +from sisl.physics import Hamiltonian from grogupy.magnetism import blow_up_orbindx, parse_magnetic_entity from grogupy.utilities import commutator -def onsite_projection(matrix, idx1, idx2): +def onsite_projection(matrix: np.array, idx1: np.array, idx2: np.array) -> np.array: """It produces the slices of a matrix for the on site projection. The slicing is along the last two axes as these contains the orbital indexing. @@ -47,7 +48,7 @@ def onsite_projection(matrix, idx1, idx2): return matrix[..., idx1, :][..., idx2] -def calc_Vu(H, Tu): +def calc_Vu(H: np.array, Tu: np.array) -> np.array: """Calculates the local perturbation in case of a spin rotation. Args: @@ -69,7 +70,7 @@ def calc_Vu(H, Tu): return Vu1, Vu2 -def build_hh_ss(dh): +def build_hh_ss(dh: Hamiltonian) -> tuple[np.array, np.array]: """It builds the Hamiltonian and Overlap matrix from the sisl.dh class. It restructures the data in the SPIN BOX representation, where NS is @@ -145,8 +146,8 @@ def build_hh_ss(dh): def setup_pairs_and_magnetic_entities( - magnetic_entities, pairs, dh, simulation_parameters -): + magnetic_entities: list, pairs: list, dh, simulation_parameters: dict +) -> tuple[list, list]: """It creates the complete structure of the dictionaries and fills some basic data. It creates orbital indexes, spin box indexes, coordinates and tags for magnetic entities. @@ -297,7 +298,7 @@ def setup_pairs_and_magnetic_entities( return pairs, magnetic_entities -def parallel_Gk(HK, SK, eran, eset): +def parallel_Gk(HK: np.array, SK: np.array, eran: np.array, eset: int) -> np.array: """Calculates the Greens function by inversion. It calculates the Greens function on all the energy levels at the same time. @@ -321,7 +322,7 @@ def parallel_Gk(HK, SK, eran, eset): return inv(SK * eran.reshape(eset, 1, 1) - HK) -def sequential_GK(HK, SK, eran, eset): +def sequential_GK(HK: np.array, SK: np.array, eran: np.array, eset: int) -> np.array: """Calculates the Greens function by inversion. It calculates sequentially over the energy levels. @@ -350,7 +351,7 @@ def sequential_GK(HK, SK, eran, eset): return Gk -def remove_clutter_for_save(pairs, magnetic_entities): +def remove_clutter_for_save(pairs: list, magnetic_entities: list) -> tuple[list, list]: """Removes unimportant data from the dictionaries. It is used before saving to throw away data that diff --git a/src/grogupy/grogu.py b/src/grogupy/grogu.py index 7c0aa85..e405001 100644 --- a/src/grogupy/grogu.py +++ b/src/grogupy/grogu.py @@ -42,16 +42,16 @@ from mpi4py import MPI try: from tqdm import tqdm - tqdm_imported = True + tqdm_imported: bool = True except: print("Please install tqdm for nice progress bar.") - tqdm_imported = False + tqdm_imported: bool = False from grogupy import * -def parse_command_line(): +def parse_command_line() -> dict: """This function can read input from the command line.""" parser = ArgumentParser() @@ -59,12 +59,19 @@ def parse_command_line(): parser.add_argument( "-i", "--input", - dest="infile", + dest="grogupy_infile", default=None, type=str, - help="Input file name", + help="Input file name for the Grogupy fdf file", required=True, ) + parser.add_argument( + "--siesta-input", + dest="infile", + default=None, + type=str, + help="Input file name for the Siesta fdf file", + ) parser.add_argument( "-o", "--output", @@ -129,16 +136,16 @@ def parse_command_line(): return cmd_line_args -def main(simulation_parameters, magnetic_entities, pairs): +def main(simulation_parameters: dict, magnetic_entities: list, pairs: list) -> None: # runtime information - times = dict() + times: dict = dict() times["start_time"] = timer() # MPI parameters comm = MPI.COMM_WORLD - size = comm.Get_size() - rank = comm.Get_rank() - root_node = 0 + size: int = comm.Get_size() + rank: int = comm.Get_rank() + root_node: int = 0 if rank == root_node: # include parallel size in simulation parameters @@ -172,8 +179,8 @@ def main(simulation_parameters, magnetic_entities, pairs): # add third direction for off-diagonal anisotropy for orientation in simulation_parameters["ref_xcf_orientations"]: - o1 = orientation["vw"][0] - o2 = orientation["vw"][1] + o1: np.array = orientation["vw"][0] + o2: np.array = orientation["vw"][1] orientation["vw"].append((o1 + o2) / np.sqrt(2)) # read sile @@ -186,7 +193,7 @@ def main(simulation_parameters, magnetic_entities, pairs): simulation_parameters["cell"] = fdf.read_geometry().cell # unit cell index - uc_in_sc_idx = dh.lattice.sc_index([0, 0, 0]) + uc_in_sc_idx: int = dh.lattice.sc_index([0, 0, 0]) if rank == root_node: print("\n\n\n\n\n") @@ -205,9 +212,9 @@ def main(simulation_parameters, magnetic_entities, pairs): "================================================================================================================================================================" ) - NO = dh.no # shorthand for number of orbitals in the unit cell + NO: int = dh.no # shorthand for number of orbitals in the unit cell - # reformat Hamltonian and Overlap matrix for manipulations + # reformat Hamiltonian and Overlap matrix for manipulations hh, ss = build_hh_ss(dh) # symmetrizing Hamiltonian and Overlap matrix to make them hermitian @@ -219,14 +226,18 @@ def main(simulation_parameters, magnetic_entities, pairs): ss[i], ss[j] = (s1 + s1d.T.conj()) / 2, (s1d + s1.T.conj()) / 2 # identifying TRS and TRB parts of the Hamiltonian - TAUY = np.kron(np.eye(NO), TAU_Y) - hTR = np.array([TAUY @ hh[i].conj() @ TAUY for i in range(dh.lattice.nsc.prod())]) - hTRS = (hh + hTR) / 2 - hTRB = (hh - hTR) / 2 + TAUY: np.array = np.kron(np.eye(NO), TAU_Y) + hTR: np.array = np.array( + [TAUY @ hh[i].conj() @ TAUY for i in range(dh.lattice.nsc.prod())] + ) + hTRS: np.array = (hh + hTR) / 2 + hTRB: np.array = (hh - hTR) / 2 # extracting the exchange field - traced = [spin_tracer(hTRB[i]) for i in range(dh.lattice.nsc.prod())] # equation 77 - XCF = np.array( + traced: np.array = [ + spin_tracer(hTRB[i]) for i in range(dh.lattice.nsc.prod()) + ] # equation 77 + XCF: np.array = np.array( [ np.array([f["x"] / 2 for f in traced]), np.array([f["y"] / 2 for f in traced]), @@ -235,7 +246,7 @@ def main(simulation_parameters, magnetic_entities, pairs): ) # check if exchange field has scalar part - max_xcfs = abs(np.array(np.array([f["c"] / 2 for f in traced]))).max() + max_xcfs: float = abs(np.array(np.array([f["c"] / 2 for f in traced]))).max() if max_xcfs > 1e-12: warnings.warn( f"Exchange field has non negligible scalar part. Largest value is {max_xcfs}" @@ -270,10 +281,10 @@ def main(simulation_parameters, magnetic_entities, pairs): ) # generate weights for k points - wkset = np.ones(len(kset)) / len(kset) + wkset: np.array = np.ones(len(kset)) / len(kset) # split the k points based on MPI size - kpcs = np.array_split(kset, size) + kpcs: np.array = np.array_split(kset, size) # use progress bar if available if rank == root_node and tqdm_imported: @@ -289,20 +300,20 @@ def main(simulation_parameters, magnetic_entities, pairs): # this will contain the three Hamiltonian in the # reference directions needed to calculate the energy # variations upon rotation - hamiltonians = [] + hamiltonians: list = [] # iterate over the reference directions (quantization axes) for i, orient in enumerate(simulation_parameters["ref_xcf_orientations"]): # obtain rotated exchange field and Hamiltonian - R = RotMa2b(simulation_parameters["scf_xcf_orientation"], orient["o"]) - rot_XCF = np.einsum("ij,jklm->iklm", R, XCF) - rot_H_XCF = sum( + R: np.array = RotMa2b(simulation_parameters["scf_xcf_orientation"], orient["o"]) + rot_XCF: np.array = np.einsum("ij,jklm->iklm", R, XCF) + rot_H_XCF: np.array = sum( [np.kron(rot_XCF[i], tau) for i, tau in enumerate([TAU_X, TAU_Y, TAU_Z])] ) - rot_H_XCF_uc = rot_H_XCF[uc_in_sc_idx] + rot_H_XCF_uc: np.array = rot_H_XCF[uc_in_sc_idx] # obtain total Hamiltonian with the rotated exchange field - rot_H = hTRS + rot_H_XCF # equation 76 + rot_H: np.array = hTRS + rot_H_XCF # equation 76 # store the relevant information of the Hamiltonian hamiltonians.append(dict(orient=orient["o"], H=rot_H)) @@ -310,11 +321,11 @@ def main(simulation_parameters, magnetic_entities, pairs): # these are the rotations (for now) perpendicular to the quantization axis for u in orient["vw"]: # section 2.H - Tu = np.kron(np.eye(NO, dtype=int), tau_u(u)) + Tu: np.array = np.kron(np.eye(NO, dtype=int), tau_u(u)) Vu1, Vu2 = calc_Vu(rot_H_XCF_uc, Tu) for mag_ent in magnetic_entities: - idx = mag_ent["spin_box_indices"] + idx: int = mag_ent["spin_box_indices"] # fill up the perturbed potentials (for now) based on the on-site projections mag_ent["Vu1"][i].append(onsite_projection(Vu1, idx, idx)) mag_ent["Vu2"][i].append(onsite_projection(Vu2, idx, idx)) @@ -347,7 +358,7 @@ def main(simulation_parameters, magnetic_entities, pairs): ) # https://stackoverflow.com/questions/70746660/how-to-predict-memory-requirement-for-np-linalg-inv # memory is O(64 n**2) for complex matrices - memory_size = getsizeof(hamiltonians[0]["H"].base) / 1024 + memory_size: int = getsizeof(hamiltonians[0]["H"].base) / 1024 print( f"Memory taken by a single Hamiltonian is: {getsizeof(hamiltonians[0]['H'].base) / 1024} KB" ) @@ -376,12 +387,12 @@ def main(simulation_parameters, magnetic_entities, pairs): # sampling the integrand on the contour and the BZ for k in kpcs[rank]: # weight of k point in BZ integral - wk = wkset[rank] + wk: float = wkset[rank] # iterate over reference directions for i, hamiltonian_orientation in enumerate(hamiltonians): # calculate Hamiltonian and Overlap matrix in a given k point - H = hamiltonian_orientation["H"] + H: np.array = hamiltonian_orientation["H"] HK, SK = hsk(H, ss, dh.sc_off, k) if simulation_parameters["parallel_solver_for_Gk"]: @@ -392,22 +403,22 @@ def main(simulation_parameters, magnetic_entities, pairs): # store the Greens function slice of the magnetic entities for mag_ent in magnetic_entities: - idx = mag_ent["spin_box_indices"] + idx: int = mag_ent["spin_box_indices"] mag_ent["Gii_tmp"][i] += onsite_projection(Gk, idx, idx) * wk for pair in pairs: # add phase shift based on the cell difference - phase = np.exp(1j * 2 * np.pi * k @ pair["Ruc"].T) + phase: np.array = np.exp(1j * 2 * np.pi * k @ pair["Ruc"].T) # get the pair orbital sizes from the magnetic entities - ai = magnetic_entities[pair["ai"]]["spin_box_indices"] - aj = magnetic_entities[pair["aj"]]["spin_box_indices"] + ai: int = magnetic_entities[pair["ai"]]["spin_box_indices"] + aj: int = magnetic_entities[pair["aj"]]["spin_box_indices"] # store the Greens function slice of the magnetic entities pair["Gij_tmp"][i] += onsite_projection(Gk, ai, aj) * phase * wk pair["Gji_tmp"][i] += onsite_projection(Gk, aj, ai) / phase * wk - # summ reduce partial results of mpi nodes + # sum reduce partial results of mpi nodes for i in range(len(hamiltonians)): for mag_ent in magnetic_entities: comm.Reduce(mag_ent["Gii_tmp"][i], mag_ent["Gii"][i], root=root_node) @@ -430,11 +441,11 @@ def main(simulation_parameters, magnetic_entities, pairs): for tracker, mag_ent in enumerate(magnetic_entities): # iterate over the quantization axes for i, Gii in enumerate(mag_ent["Gii"]): - storage = [] + storage: list = [] # iterate over the first and second order local perturbations for Vu1, Vu2 in zip(mag_ent["Vu1"][i], mag_ent["Vu2"][i]): # The Szunyogh-Lichtenstein formula - traced = np.trace( + traced: np.array = np.trace( (Vu2 @ Gii + 0.5 * Gii @ Vu1 @ Gii), axis1=1, axis2=2 ) # this is the on site projection # evaluation of the contour integral @@ -450,16 +461,16 @@ def main(simulation_parameters, magnetic_entities, pairs): for tracker, pair in enumerate(pairs): # iterate over the quantization axes for i, (Gij, Gji) in enumerate(zip(pair["Gij"], pair["Gji"])): - site_i = magnetic_entities[pair["ai"]] - site_j = magnetic_entities[pair["aj"]] - storage = [] + site_i: int = magnetic_entities[pair["ai"]] + site_j: int = magnetic_entities[pair["aj"]] + storage: list = [] # iterate over the first order local perturbations in all possible orientations for the two sites # actually all possible orientations without the orientation for the off-diagonal anisotropy # that is why we only take the first two of each Vu1 for Vui in site_i["Vu1"][i][:2]: for Vuj in site_j["Vu1"][i][:2]: # The Szunyogh-Lichtenstein formula - traced = np.trace( + traced: np.array = np.trace( (Vui @ Gij @ Vuj @ Gji), axis1=1, axis2=2 ) # this is the on site projection # evaluation of the contour integral @@ -499,7 +510,7 @@ def main(simulation_parameters, magnetic_entities, pairs): # remove unwanted stuff before saving pairs, magnetic_entities = remove_clutter_for_save(pairs, magnetic_entities) # create output dictionary with all the relevant data - results = dict( + results: dict = dict( parameters=simulation_parameters, magnetic_entities=magnetic_entities, pairs=pairs, @@ -514,28 +525,25 @@ def main(simulation_parameters, magnetic_entities, pairs): if __name__ == "__main__": # loading parameters - # it is not clear how to give grogu.fdf path... - # command_line_arguments = parse_command_line() - # fdf_arguments, magnetic_entities, pairs = read_fdf(command_line_arguments["infile"]) - - # right now we do not use any of these input, but it shows - # the order of priority when processing arguments - default_arguments = False - fdf_arguments = False - command_line_arguments = False - # simulation_parameters = process_input_args( - # default_arguments, fdf_arguments, command_line_arguments, ACCEPTED_INPUTS - # ) + command_line_arguments = parse_command_line() + fdf_arguments, magnetic_entities, pairs = read_grogupy_fdf( + command_line_arguments["grogupy_infile"] + ) + + # combine the inputs to a single dictionary + simulation_parameters: Final[dict] = process_input_args( + DEFAULT_ARGUMENTS, fdf_arguments, command_line_arguments, ACCEPTED_INPUTS + ) #################################################################################################### # This is the input file for now # #################################################################################################### - magnetic_entities = [ + magnetic_entities: list = [ dict(atom=3, l=2), dict(atom=4, l=2), dict(atom=5, l=2), ] - pairs = [ + pairs: list = [ dict(ai=0, aj=1, Ruc=np.array([0, 0, 0])), dict(ai=0, aj=2, Ruc=np.array([0, 0, 0])), dict(ai=1, aj=2, Ruc=np.array([0, 0, 0])), @@ -546,7 +554,7 @@ if __name__ == "__main__": dict(ai=1, aj=2, Ruc=np.array([-2, 0, 0])), dict(ai=1, aj=2, Ruc=np.array([-3, 0, 0])), ] - simulation_parameters = dict() + simulation_parameters: dict = dict() simulation_parameters["infile"] = ( "/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf" ) diff --git a/src/grogupy/io.py b/src/grogupy/io.py index 77fbc38..dd67f35 100644 --- a/src/grogupy/io.py +++ b/src/grogupy/io.py @@ -22,13 +22,15 @@ """ from pickle import dump, load +from typing import Final import numpy as np -from globals import ACCEPTED_INPUTS, DEFAULT_ARGUMENTS from sisl.io import fdfSileSiesta +from grogupy.constants import ACCEPTED_INPUTS -def read_fdf(path): + +def read_grogupy_fdf(path: str) -> tuple[dict, list, list]: """It reads the simulation parameters, magnetic entities and pairs from the fdf. Args: @@ -113,26 +115,39 @@ def read_fdf(path): MagneticEntities = fdf.get("MagneticEntities") if MagneticEntities is not None: magnetic_entities = [] + # iterate over magnetic entities for mag_ent in MagneticEntities: + # drop comments from data row = mag_ent.split() dat = [] for string in row: if string.find("#") != -1: break dat.append(string) + # cluster input if dat[0] in {"Cluster", "cluster"}: magnetic_entities.append(dict(atom=[int(_) for _ in dat[1:]])) continue + # atom input, same as cluster, but raises + # error when multiple atoms are given + if dat[0] in {"Atom", "atom"}: + if len(dat) > 2: + raise Exception("Atom input must be a single integer") + magnetic_entities.append(dict(atom=int(dat[1]))) + continue + # atom and shell information elif dat[0] in {"AtomShell", "Atomshell", "atomShell", "atomshell"}: magnetic_entities.append( dict(atom=int(dat[1]), l=[int(_) for _ in dat[2:]]) ) continue + # atom and orbital information elif dat[0] in {"AtomOrbital", "Atomorbital", "tomOrbital", "atomorbital"}: magnetic_entities.append( dict(atom=int(dat[1]), orb=[int(_) for _ in dat[2:]]) ) continue + # orbital information elif dat[0] in {"Orbitals", "orbitals"}: magnetic_entities.append(dict(orb=[int(_) for _ in dat[1:]])) continue @@ -143,11 +158,11 @@ def read_fdf(path): def process_input_args( - default_arguments, - fdf_arguments, - command_line_arguments, - accepted_inputs=ACCEPTED_INPUTS, -): + DEFAULT_ARGUMENTS: dict, + fdf_arguments: dict, + command_line_arguments: dict, + accepted_inputs: dict = ACCEPTED_INPUTS, +) -> dict: """It returns the final simulation parameters based on the inputs. The merging is done in the order of priority: @@ -168,6 +183,9 @@ def process_input_args( The final simulation parameters """ + # copy input so it does not get changed + default_arguments = DEFAULT_ARGUMENTS.copy() + # iterate over fdf_arguments and update default arguments for key, value in fdf_arguments.values(): if value is not None and key in accepted_inputs: @@ -181,7 +199,7 @@ def process_input_args( return default_arguments -def save_pickle(outfile, data): +def save_pickle(outfile: str, data: dict) -> None: """Saves the data in the outfile with pickle. Args: @@ -196,7 +214,7 @@ def save_pickle(outfile, data): dump(data, output_file) -def load_pickle(infile): +def load_pickle(infile: str) -> dict: """Loads the data from the infile with pickle. Args: @@ -215,7 +233,7 @@ def load_pickle(infile): return data -def print_job_description(simulation_parameters): +def print_job_description(simulation_parameters: dict) -> None: """It prints the parameters and the description of the job. @@ -273,7 +291,7 @@ def print_job_description(simulation_parameters): ) -def print_parameters(simulation_parameters): +def print_parameters(simulation_parameters: dict) -> None: """It prints the simulation parameters for the grogu out. Args: @@ -319,7 +337,7 @@ def print_parameters(simulation_parameters): ) -def print_atoms_and_pairs(magnetic_entities, pairs): +def print_atoms_and_pairs(magnetic_entities: list, pairs: list) -> None: """It prints the pair and magnetic entity information for the grogu out. Args: @@ -408,7 +426,7 @@ def print_atoms_and_pairs(magnetic_entities, pairs): ) -def print_runtime_information(times): +def print_runtime_information(times: dict) -> None: """It prints the runtime information for the grogu out. Args: @@ -426,10 +444,10 @@ def print_runtime_information(times): f"Hamiltonian conversion and XC field extraction: {times['H_and_XCF_time'] - times['setup_time']:.3f} s" ) print( - f"Pair and site datastructure creatrions: {times['site_and_pair_dictionaries_time'] - times['H_and_XCF_time']:.3f} s" + f"Pair and site datastructures creations: {times['site_and_pair_dictionaries_time'] - times['H_and_XCF_time']:.3f} s" ) print( - f"k set cration and distribution: {times['k_set_time'] - times['site_and_pair_dictionaries_time']:.3f} s" + f"k set creation and distribution: {times['k_set_time'] - times['site_and_pair_dictionaries_time']:.3f} s" ) print( f"Rotating XC potential: {times['reference_rotations_time'] - times['k_set_time']:.3f} s" diff --git a/src/grogupy/magnetism.py b/src/grogupy/magnetism.py index 0f3d73f..2024bc4 100644 --- a/src/grogupy/magnetism.py +++ b/src/grogupy/magnetism.py @@ -21,10 +21,13 @@ """Docstring in magnetism. """ +from typing import Union + import numpy as np +from sisl.physics import Hamiltonian -def blow_up_orbindx(orb_indices): +def blow_up_orbindx(orb_indices: np.array) -> np.array: """Function to blow up orbital indices to make SPIN BOX indices. Args: @@ -41,7 +44,7 @@ def blow_up_orbindx(orb_indices): return orb_indices -def spin_tracer(M): +def spin_tracer(M: np.array) -> dict: """Spin tracer utility. This takes an operator with the orbital-spin sequence: @@ -75,7 +78,12 @@ def spin_tracer(M): return M_o -def parse_magnetic_entity(dh, atom=None, l=None, **kwargs): +def parse_magnetic_entity( + dh: Hamiltonian, + atom: Union[None, int, list] = None, + l: Union[None, int, list] = None, + orb: Union[None, int, list] = None, +) -> np.array: """Function to define orbital indexes of a given magnetic entity. Args: @@ -83,13 +91,19 @@ def parse_magnetic_entity(dh, atom=None, l=None, **kwargs): Hamiltonian from sisl atom : integer or list of integers, optional Defining atom (or atoms) in the unit cell forming the magnetic entity. Defaults to None - l : integer, optional + l : integer or list of integers, optional Defining the angular momentum channel. Defaults to None + orb : integer or list of integers, optional + Defining the orbital index in the Hamiltonian or on the atom. Defaults to None Returns: list The orbital indexes of the given magnetic entity """ + + # the case for the Orbitals keyword + if atom is None: + return orb # case where we deal with more than one atom defining the magnetic entity if type(atom) == list: dat = [] @@ -100,21 +114,21 @@ def parse_magnetic_entity(dh, atom=None, l=None, **kwargs): ): # if specified we restrict to given l angular momentum channel inside each atom a_orb_idx = a_orb_idx[[o.l == l for o in dh.geometry.atoms[a].orbitals]] dat.append(a_orb_idx) - orbital_indeces = np.hstack(dat) - # case where we deal with a singel atom magnetic entity + orbital_indexes = np.hstack(dat) + # case where we deal with a single atom magnetic entity elif type(atom) == int: - orbital_indeces = dh.geometry.a2o(atom, all=True) + orbital_indexes = dh.geometry.a2o(atom, all=True) if ( type(l) == int ): # if specified we restrict to given l angular momentum channel - orbital_indeces = orbital_indeces[ + orbital_indexes = orbital_indexes[ [o.l == l for o in dh.geometry.atoms[atom].orbitals] ] - return orbital_indeces # numpy array containing integers labeling orbitals associated to a magnetic entity. + return orbital_indexes # numpy array containing integers labeling orbitals associated to a magnetic entity. -def calculate_anisotropy_tensor(mag_ent): +def calculate_anisotropy_tensor(mag_ent: dict) -> tuple[np.array, float]: """Calculates the renormalized anisotropy tensor from the energies. Args: @@ -154,7 +168,7 @@ def calculate_anisotropy_tensor(mag_ent): return K, consistency_check -def calculate_exchange_tensor(pair): +def calculate_exchange_tensor(pair: dict) -> tuple[float, np.array, np.array, np.array]: """Calculates the exchange tensor from the energies. It produces the isotropic exchange, the relevant elements diff --git a/src/grogupy/utilities.py b/src/grogupy/utilities.py index e5964d1..eebaa3b 100644 --- a/src/grogupy/utilities.py +++ b/src/grogupy/utilities.py @@ -21,13 +21,16 @@ """Docstring in utilities. """ +from typing import Union + import numpy as np -from globals import TAU_0, TAU_X, TAU_Y, TAU_Z from scipy.special import roots_legendre from sisl.io.siesta import eigSileSiesta +from grogupy.constants import TAU_0, TAU_X, TAU_Y, TAU_Z + -def commutator(a, b): +def commutator(a: np.array, b: np.array) -> np.array: """Shorthand for commutator. Commutator of two matrices in the mathematical sense. @@ -47,7 +50,9 @@ def commutator(a, b): # define some useful functions -def hsk(H, ss, sc_off, k=(0, 0, 0)): +def hsk( + H: np.array, ss: np.array, sc_off: list, k: tuple = (0, 0, 0) +) -> tuple[np.array, np.array]: """Speed up Hk and Sk generation. Calculates the Hamiltonian and the Overlap matrix at a given k point. It is faster that the sisl version. @@ -83,7 +88,7 @@ def hsk(H, ss, sc_off, k=(0, 0, 0)): return HK, SK -def make_kset(dirs="xyz", NUMK=20): +def make_kset(dirs: str = "xyz", NUMK: int = 20) -> np.array: """Simple k-grid generator to sample the Brillouin zone. Depending on the value of the dirs @@ -109,19 +114,26 @@ def make_kset(dirs="xyz", NUMK=20): kran = len(dirs) * [np.linspace(0, 1, NUMK, endpoint=False)] mg = np.meshgrid(*kran) - dirsdict = dict() + directions = dict() for d in enumerate(dirs): - dirsdict[d[1]] = mg[d[0]].flatten() + directions[d[1]] = mg[d[0]].flatten() for d in "xyz": if not (d in dirs): - dirsdict[d] = 0 * dirsdict[dirs[0]] - kset = np.array([dirsdict[d] for d in "xyz"]).T + directions[d] = 0 * directions[dirs[0]] + kset = np.array([directions[d] for d in "xyz"]).T return kset -def make_contour(emin=-20, emax=0.0, enum=42, p=150): +# just an empty container class +class Container: + pass + + +def make_contour( + emin: float = -20, emax: float = 0.0, enum: int = 42, p: float = 150 +) -> Container: """A more sophisticated contour generator. Calculates the parameters for the complex contour integral. It uses the @@ -139,7 +151,7 @@ def make_contour(emin=-20, emax=0.0, enum=42, p=150): Shape parameter that describes the distribution of the sample points. Defaults to 150 Returns: - ccont + Container Contains all the information for the contour integral """ x, wl = roots_legendre(enum) @@ -153,11 +165,7 @@ def make_contour(emin=-20, emax=0.0, enum=42, p=150): ze = z0 + R * np.exp(1j * phi) # complex points for path we = -(y2 - y1) / 2 * np.exp(-y) / p * 1j * (ze - z0) * wl - # just an empty container class - class ccont: - pass - - cont = ccont() + cont = Container() cont.R = R cont.z0 = z0 cont.ze = ze @@ -167,7 +175,7 @@ def make_contour(emin=-20, emax=0.0, enum=42, p=150): return cont -def tau_u(u): +def tau_u(u: Union[list, np.array]) -> np.array: """Pauli matrix in direction u. Returns the vector u in the basis of the Pauli matrices. @@ -188,7 +196,7 @@ def tau_u(u): # -def crossM(u): +def crossM(u: Union[list, np.array]) -> np.array: """Definition for the cross-product matrix. It acts as a cross product with vector u. @@ -204,7 +212,7 @@ def crossM(u): return np.array([[0, -u[2], u[1]], [u[2], 0, -u[0]], [-u[1], u[0], 0]]) -def RotM(theta, u, eps=1e-10): +def RotM(theta: float, u: np.array, eps: float = 1e-10) -> np.array: """Definition of rotation matrix with angle theta around direction u. @@ -234,7 +242,7 @@ def RotM(theta, u, eps=1e-10): return M -def RotMa2b(a, b, eps=1e-10): +def RotMa2b(a: np.array, b: np.array, eps: float = 1e-10) -> np.array: """Definition of rotation matrix rotating unit vector a to unit vector b. Function returns array R such that R @ a = b holds. @@ -261,7 +269,7 @@ def RotMa2b(a, b, eps=1e-10): return M -def read_siesta_emin(eigfile): +def read_siesta_emin(eigfile: str) -> float: """It reads the lowest energy level from the siesta run. It uses the .EIG file from siesta that contains the eigenvalues. @@ -276,12 +284,12 @@ def read_siesta_emin(eigfile): """ # read the file - eigs = eigSileSiesta(eigfile).read_data() + eigenvalues = eigSileSiesta(eigfile).read_data() - return eigs.min() + return eigenvalues.min() -def int_de_ke(traced, we): +def int_de_ke(traced: np.array, we: float) -> np.array: """It numerically integrates the traced matrix. It is a wrapper from numpy.trapz and it contains the diff --git a/test.ipynb b/test.ipynb index a68317f..8c33de4 100644 --- a/test.ipynb +++ b/test.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -16,7 +16,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -24,14 +24,14 @@ "output_type": "stream", "text": [ "0.14.3\n", - "numpy version unknown.\n" + "1.24.4\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "[Daniels-MacBook-Air.local:40959] shmem: mmap: an error occurred while determining whether or not /var/folders/yh/dx7xl94n3g52ts3td8qcxjcc0000gn/T//ompi.Daniels-MacBook-Air.501/jf.0/3756457984/sm_segment.Daniels-MacBook-Air.501.dfe70000.0 could be created.\n" + "[Daniels-MacBook-Air.local:67208] shmem: mmap: an error occurred while determining whether or not /var/folders/yh/dx7xl94n3g52ts3td8qcxjcc0000gn/T//ompi.Daniels-MacBook-Air.501/jf.0/2623209472/sm_segment.Daniels-MacBook-Air.501.9c5b0000.0 could be created.\n" ] } ], @@ -63,6 +63,60 @@ " print(\"numpy version unknown.\")" ] }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "({'infile': '/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf',\n", + " 'outfile': './Fe3GeTe2_notebook',\n", + " 'scf_xcf_orientation': array('[ 0 0 1 ]', dtype=' 43\u001b[0m \u001b[43mprint_parameters\u001b[49m\u001b[43m(\u001b[49m\u001b[43msimulation_parameters\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 44\u001b[0m times[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msetup_time\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m timer()\n\u001b[1;32m 45\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSetup done. Elapsed time: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtimes[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msetup_time\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m s\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", - "File \u001b[0;32m~/Documents/oktatás/elte/phd/grogu_project/.venv/lib/python3.9/site-packages/grogupy/io.py:132\u001b[0m, in \u001b[0;36mprint_parameters\u001b[0;34m(simulation_parameters)\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEsetp: \u001b[39m\u001b[38;5;124m\"\u001b[39m, simulation_parameters[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mesetp\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[1;32m 129\u001b[0m \u001b[38;5;28mprint\u001b[39m(\n\u001b[1;32m 130\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m================================================================================================================================================================\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 131\u001b[0m )\n\u001b[0;32m--> 132\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43msimulation_parameters\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcalculate_charge\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m:\n\u001b[1;32m 133\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThe calculated charge of the Hamiltonian in the quantization axes: \u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 134\u001b[0m \u001b[38;5;28mprint\u001b[39m(simulation_parameters[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcharges\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n", - "\u001b[0;31mKeyError\u001b[0m: 'calculate_charge'" - ] - } - ], + "outputs": [], "source": [ "# MPI parameters\n", "comm = MPI.COMM_WORLD\n", diff --git a/tests/test_io.py b/tests/test_io.py index be895fe..801ba5d 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -27,7 +27,7 @@ from pathlib import Path import numpy as np import pytest -from grogupy.globals import DEFAULT_ARGUMENTS +from grogupy.constants import DEFAULT_ARGUMENTS from grogupy.io import ( load_pickle, print_atoms_and_pairs, diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 44f01db..698225e 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -24,7 +24,7 @@ from hypothesis import given from hypothesis import strategies as st from numpy.testing import assert_allclose, assert_array_almost_equal -from grogupy.globals import TAU_0, TAU_X, TAU_Y, TAU_Z +from grogupy.constants import TAU_0, TAU_X, TAU_Y, TAU_Z from grogupy.utilities import ( RotM, RotMa2b,