commenting and documentation

class-solution
Daniel Pozsar 2 months ago
parent 51fdb95eb9
commit 6563280eb7

@ -21,15 +21,57 @@
import numpy as np import numpy as np
from numpy.linalg import inv from numpy.linalg import inv
from grogupy.utils import blow_up_orbindx, commutator, parse_magnetic_entity from grogupy.magnetism import blow_up_orbindx, parse_magnetic_entity
from grogupy.utils import commutator
def parallel_Gk(HK, SK, eran, eset): def parallel_Gk(HK, SK, eran, eset):
"""Calculates the Greens function by inversion.
It calculates the Greens function on all the energy levels at the same time.
Args:
HK : (NO, NO), np.array_like
Hamiltonian at a given k point
SK : (NO, NO), np.array_like
Overlap Matrix at a given k point
eran : (eset) np.array_like
Energy sample along the contour
eset : int
Number of energy samples along the contour
Returns:
Gk : (eset, NO, NO), np.array_like
Green's function at a given k point
"""
# Calculates the Greens function on all the energy levels
return inv(SK * eran.reshape(eset, 1, 1) - HK) return inv(SK * eran.reshape(eset, 1, 1) - HK)
def sequential_GK(HK, SK, eran, eset): def sequential_GK(HK, SK, eran, eset):
"""Calculates the Greens function by inversion.
It calculates sequentially over the energy levels.
Args:
HK : (NO, NO), np.array_like
Hamiltonian at a given k point
SK : (NO, NO), np.array_like
Overlap Matrix at a given k point
eran : (eset) np.array_like
Energy sample along the contour
eset : int
Number of energy samples along the contour
Returns:
Gk : (eset, NO, NO), np.array_like
Green's function at a given k point
"""
# creates an empty holder
Gk = np.zeros(shape=(eset, HK.shape[0], HK.shape[1]), dtype="complex128") Gk = np.zeros(shape=(eset, HK.shape[0], HK.shape[1]), dtype="complex128")
# fills the holder sequentially by the Greens function on a given energy
for j in range(eset): for j in range(eset):
Gk[j] = inv(SK * eran[j] - HK) Gk[j] = inv(SK * eran[j] - HK)
@ -37,14 +79,19 @@ def sequential_GK(HK, SK, eran, eset):
def calc_Vu(H, Tu): def calc_Vu(H, Tu):
"""_summary_ """Calculates the local perturbation in case of a spin rotation.
Args: Args:
H (_type_): _description_ H : (NO, NO) np.array_like
Tu (_type_): _description_ Hamiltonian
Tu : (NO, NO) array_like
Rotation around u
Returns: Returns:
_type_: _description_ Vu1 : (NO, NO) np.array_like
First order perturbed matrix
Vu2 : (NO, NO) np.array_like
Second order perturbed matrix
""" """
Vu1 = 1j / 2 * commutator(H, Tu) # equation 100 Vu1 = 1j / 2 * commutator(H, Tu) # equation 100
@ -54,15 +101,24 @@ def calc_Vu(H, Tu):
def remove_clutter_for_save(pairs, magnetic_entities): def remove_clutter_for_save(pairs, magnetic_entities):
"""_summary_ """Removes unimportant data from the dictionaries.
It is used before saving to throw away data that
is not needed for post processing.
Args: Args:
pairs (_type_): _description_ pairs : dict
magnetic_entities (_type_): _description_ Contains all the pair information
magnetic_entities : dict
Contains all the magnetic entity information
Returns: Returns:
_type_: _description_ pairs : dict
Contains all the reduced pair information
magnetic_entities : dict
Contains all the reduced magnetic entity information
""" """
# remove clutter from magnetic entities and pair information # remove clutter from magnetic entities and pair information
for pair in pairs: for pair in pairs:
del pair["Gij"] del pair["Gij"]
@ -79,14 +135,22 @@ def remove_clutter_for_save(pairs, magnetic_entities):
def build_hh_ss(dh): def build_hh_ss(dh):
"""_summary_ """It builds the Hamiltonian and Overlap matrix from the sisl.dh class.
It restructures the data in the SPIN BOX representation, where NS is
the number of supercells and NO is the number of orbitals.
Args: Args:
dh (_type_): _description_ dh : sisl.physics.Hamiltonian
Hamiltonian read in by sisl
Returns: Returns:
_type_: _description_ hh : (NS, NO, NO) np.array_like
Hamiltonian in SPIN BOX representation
ss : (NS, NO, NO) np.array_like
Overlap matrix in SPIN BOX representation
""" """
NO = dh.no # shorthand for number of orbitals in the unit cell NO = dh.no # shorthand for number of orbitals in the unit cell
# preprocessing Hamiltonian and overlap matrix elements # preprocessing Hamiltonian and overlap matrix elements
@ -142,23 +206,35 @@ def build_hh_ss(dh):
] ]
) )
return hh, ss, NO return hh, ss
def setup_pairs_and_magnetic_entities( def setup_pairs_and_magnetic_entities(
magnetic_entities, pairs, dh, simulation_parameters magnetic_entities, pairs, dh, simulation_parameters
): ):
"""_summary_ """It creates the complete structure of the dictionaries and fills some basic data.
It creates orbital indexes, spin box indexes, coordinates and tags for magnetic entities.
Furthermore it creates the structures for the energies, the perturbed potentials and
the Greens function calculation. It dose the same for the pairs.
Args: Args:
magnetic_entities (_type_): _description_ pairs : dict
pairs (_type_): _description_ Contains the initial pair information
dh (_type_): _description_ magnetic_entities : dict
simulation_parameters (_type_): _description_ Contains the initial magnetic entity information
dh : sisl.physics.Hamiltonian
Hamiltonian read in by sisl
simulation_parameters : dict
A set of parameters from the simulation
Returns: Returns:
_type_: _description_ pairs : dict
Contains the initial information and the complete structure
magnetic_entities : dict
Contains the initial information and the complete structure
""" """
# for every site we have to store 3 Greens function (and the associated _tmp-s) in the 3 reference directions # for every site we have to store 3 Greens function (and the associated _tmp-s) in the 3 reference directions
for mag_ent in magnetic_entities: for mag_ent in magnetic_entities:
parsed = parse_magnetic_entity(dh, **mag_ent) # parse orbital indexes parsed = parse_magnetic_entity(dh, **mag_ent) # parse orbital indexes
@ -169,11 +245,14 @@ def setup_pairs_and_magnetic_entities(
# if orbital is not set use all # if orbital is not set use all
if "l" not in mag_ent.keys(): if "l" not in mag_ent.keys():
mag_ent["l"] = "all" mag_ent["l"] = "all"
# tag creation for one atom
if isinstance(mag_ent["atom"], int): if isinstance(mag_ent["atom"], int):
mag_ent["tags"] = [ mag_ent["tags"] = [
f"[{mag_ent['atom']}]{dh.atoms[mag_ent['atom']].tag}({mag_ent['l']})" f"[{mag_ent['atom']}]{dh.atoms[mag_ent['atom']].tag}({mag_ent['l']})"
] ]
mag_ent["xyz"] = [dh.xyz[mag_ent["atom"]]] mag_ent["xyz"] = [dh.xyz[mag_ent["atom"]]]
# tag creation for more atoms
if isinstance(mag_ent["atom"], list): if isinstance(mag_ent["atom"], list):
mag_ent["tags"] = [] mag_ent["tags"] = []
mag_ent["xyz"] = [] mag_ent["xyz"] = []
@ -187,9 +266,8 @@ def setup_pairs_and_magnetic_entities(
# calculate size for Greens function generation # calculate size for Greens function generation
spin_box_shape = len(mag_ent["spin_box_indices"]) spin_box_shape = len(mag_ent["spin_box_indices"])
mag_ent["energies"] = ( # we will store the second order energy derivations here
[] mag_ent["energies"] = []
) # we will store the second order energy derivations here
# These will be the perturbed potentials from eq. 100 # These will be the perturbed potentials from eq. 100
mag_ent["Vu1"] = [] # so they are independent in memory mag_ent["Vu1"] = [] # so they are independent in memory
@ -197,7 +275,7 @@ def setup_pairs_and_magnetic_entities(
mag_ent["Gii"] = [] # Greens function mag_ent["Gii"] = [] # Greens function
mag_ent["Gii_tmp"] = [] # Greens function for parallelization mag_ent["Gii_tmp"] = [] # Greens function for parallelization
for i in simulation_parameters["ref_xcf_orientations"]: for _ in simulation_parameters["ref_xcf_orientations"]:
# Rotations for every quantization axis # Rotations for every quantization axis
mag_ent["Vu1"].append([]) mag_ent["Vu1"].append([])
mag_ent["Vu2"].append([]) mag_ent["Vu2"].append([])
@ -227,6 +305,7 @@ def setup_pairs_and_magnetic_entities(
# calculate size for Greens function generation # calculate size for Greens function generation
spin_box_shape_i = len(magnetic_entities[pair["ai"]]["spin_box_indices"]) spin_box_shape_i = len(magnetic_entities[pair["ai"]]["spin_box_indices"])
spin_box_shape_j = len(magnetic_entities[pair["aj"]]["spin_box_indices"]) spin_box_shape_j = len(magnetic_entities[pair["aj"]]["spin_box_indices"])
# tag generation
pair["tags"] = [] pair["tags"] = []
for mag_ent in [magnetic_entities[pair["ai"]], magnetic_entities[pair["aj"]]]: for mag_ent in [magnetic_entities[pair["ai"]], magnetic_entities[pair["aj"]]]:
tag = "" tag = ""
@ -253,7 +332,7 @@ def setup_pairs_and_magnetic_entities(
pair["Gji"] = [] pair["Gji"] = []
pair["Gij_tmp"] = [] # Greens function for parallelization pair["Gij_tmp"] = [] # Greens function for parallelization
pair["Gji_tmp"] = [] pair["Gji_tmp"] = []
for i in simulation_parameters["ref_xcf_orientations"]: for _ in simulation_parameters["ref_xcf_orientations"]:
# Greens functions for every quantization axis # Greens functions for every quantization axis
pair["Gij"].append( pair["Gij"].append(
np.zeros( np.zeros(
@ -284,14 +363,19 @@ def setup_pairs_and_magnetic_entities(
def onsite_projection(matrix, idx1, idx2): def onsite_projection(matrix, idx1, idx2):
"""_summary_ """It produces the slices of a matrix for the on site projection.
The slicing is along the last two axes as these contains the orbital indexing.
Args: Args:
matrix (_type_): _description_ matrix : (..., :, :) np.array_like
idx (_type_): _description_ Some matrix
idx : np.array_like
The indexes of the orbitals
Returns: Returns:
_type_: _description_ np.array_like
Reduced matrix based on the projection
""" """
return matrix[..., idx1, :][..., idx2] return matrix[..., idx1, :][..., idx2]

@ -19,45 +19,44 @@
# SOFTWARE. # SOFTWARE.
import os import os
os.environ["OMP_NUM_THREADS"] = "1" # export OMP_NUM_THREADS=1
os.environ["OPENBLAS_NUM_THREADS"] = "1" # export OPENBLAS_NUM_THREADS=1
os.environ["MKL_NUM_THREADS"] = "1" # export MKL_NUM_THREADS=1
os.environ["VECLIB_MAXIMUM_THREADS"] = "1" # export VECLIB_MAXIMUM_THREADS=1
os.environ["NUMEXPR_NUM_THREADS"] = "1" # export NUMEXPR_NUM_THREADS=1
from timeit import default_timer as timer
# runtime information
times = dict()
times["start_time"] = timer()
import warnings import warnings
from sys import getsizeof from sys import getsizeof
from timeit import default_timer as timer
import numpy as np
import sisl import sisl
from mpi4py import MPI from mpi4py import MPI
from src.grogupy import * from grogupy import *
# input output stuff
######################################################################
######################################################################
######################################################################
infile = ( def main():
"/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf"
)
outfile = "./Fe3GeTe2_notebook"
magnetic_entities = [ # constrain numpy in parallel run
os.environ["OMP_NUM_THREADS"] = "1" # export OMP_NUM_THREADS=1
os.environ["OPENBLAS_NUM_THREADS"] = "1" # export OPENBLAS_NUM_THREADS=1
os.environ["MKL_NUM_THREADS"] = "1" # export MKL_NUM_THREADS=1
os.environ["VECLIB_MAXIMUM_THREADS"] = "1" # export VECLIB_MAXIMUM_THREADS=1
os.environ["NUMEXPR_NUM_THREADS"] = "1" # export NUMEXPR_NUM_THREADS=1
# runtime information
times = dict()
times["start_time"] = timer()
# input output stuff
######################################################################
######################################################################
######################################################################
infile = "/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf"
outfile = "./Fe3GeTe2_notebook"
magnetic_entities = [
dict(atom=3, l=2), dict(atom=3, l=2),
dict(atom=4, l=2), dict(atom=4, l=2),
dict(atom=5, l=2), dict(atom=5, l=2),
] ]
pairs = [ pairs = [
dict(ai=0, aj=1, Ruc=np.array([0, 0, 0])), dict(ai=0, aj=1, Ruc=np.array([0, 0, 0])),
dict(ai=0, aj=2, Ruc=np.array([0, 0, 0])), dict(ai=0, aj=2, Ruc=np.array([0, 0, 0])),
dict(ai=1, aj=2, Ruc=np.array([0, 0, 0])), dict(ai=1, aj=2, Ruc=np.array([0, 0, 0])),
@ -67,34 +66,30 @@ pairs = [
dict(ai=1, aj=2, Ruc=np.array([-1, 0, 0])), dict(ai=1, aj=2, Ruc=np.array([-1, 0, 0])),
dict(ai=1, aj=2, Ruc=np.array([-2, 0, 0])), dict(ai=1, aj=2, Ruc=np.array([-2, 0, 0])),
dict(ai=1, aj=2, Ruc=np.array([-3, 0, 0])), dict(ai=1, aj=2, Ruc=np.array([-3, 0, 0])),
] ]
simulation_parameters = default_args simulation_parameters = default_args
simulation_parameters["infile"] = infile simulation_parameters["infile"] = infile
simulation_parameters["outfile"] = outfile simulation_parameters["outfile"] = outfile
simulation_parameters["kset"] = 20 simulation_parameters["kset"] = 20
simulation_parameters["kdirs"] = "xy" simulation_parameters["kdirs"] = "xy"
simulation_parameters["eset"] = 600 simulation_parameters["eset"] = 600
simulation_parameters["esetp"] = 10000 simulation_parameters["esetp"] = 10000
fdf = sisl.io.fdfSileSiesta("input.fdf") ######################################################################
fdf.get("XCF_Rotation") ######################################################################
######################################################################
######################################################################
###################################################################### # MPI parameters
###################################################################### comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# MPI parameters root_node = 0
comm = MPI.COMM_WORLD
size = comm.Get_size() if rank == root_node:
rank = comm.Get_rank() # include parallel size in simulation parameters
root_node = 0 simulation_parameters["parallel_size"] = size
# include parallel size in simulation parameters # check versions for debugging
simulation_parameters["parallel_size"] = size
# check versions for debugging
if rank == root_node:
try: try:
print("sisl version: ", sisl.__version__) print("sisl version: ", sisl.__version__)
except: except:
@ -104,12 +99,12 @@ if rank == root_node:
except: except:
print("numpy version unknown.") print("numpy version unknown.")
# rename outfile # rename outfile
if not simulation_parameters["outfile"].endswith(".pickle"): if not simulation_parameters["outfile"].endswith(".pickle"):
simulation_parameters["outfile"] += ".pickle" simulation_parameters["outfile"] += ".pickle"
# if ebot is not given put it 0.1 eV under the smallest energy # if ebot is not given put it 0.1 eV under the smallest energy
if simulation_parameters["ebot"] is None: if simulation_parameters["ebot"] is None:
try: try:
eigfile = simulation_parameters["infile"][:-3] + "EIG" eigfile = simulation_parameters["infile"][:-3] + "EIG"
simulation_parameters["ebot"] = read_siesta_emin(eigfile) - 0.1 simulation_parameters["ebot"] = read_siesta_emin(eigfile) - 0.1
@ -117,20 +112,22 @@ if simulation_parameters["ebot"] is None:
except: except:
print("Could not determine ebot.") print("Could not determine ebot.")
print("Parameter was not given and .EIG file was not found.") print("Parameter was not given and .EIG file was not found.")
else:
simulation_parameters["automatic_ebot"] = False
# read sile # read sile
fdf = sisl.get_sile(simulation_parameters["infile"]) fdf = sisl.get_sile(simulation_parameters["infile"])
# read in hamiltonian # read in hamiltonian
dh = fdf.read_hamiltonian() dh = fdf.read_hamiltonian()
# read unit cell vectors # read unit cell vectors
simulation_parameters["cell"] = fdf.read_geometry().cell simulation_parameters["cell"] = fdf.read_geometry().cell
# unit cell index # unit cell index
uc_in_sc_idx = dh.lattice.sc_index([0, 0, 0]) uc_in_sc_idx = dh.lattice.sc_index([0, 0, 0])
if rank == root_node: if rank == root_node:
print("\n\n\n\n\n") print("\n\n\n\n\n")
print( print(
"#################################################################### JOB INFORMATION ###########################################################################" "#################################################################### JOB INFORMATION ###########################################################################"
@ -147,42 +144,43 @@ if rank == root_node:
"================================================================================================================================================================" "================================================================================================================================================================"
) )
NO = dh.no # shorthand for number of orbitals in the unit cell
# reformat Hamltonian and Overlap matrix for manipulations # reformat Hamltonian and Overlap matrix for manipulations
hh, ss, NO = build_hh_ss(dh) hh, ss = build_hh_ss(dh)
# symmetrizing Hamiltonian and Overlap matrix to make them hermitian # symmetrizing Hamiltonian and Overlap matrix to make them hermitian
for i in range(dh.lattice.sc_off.shape[0]): for i in range(dh.lattice.sc_off.shape[0]):
j = dh.lattice.sc_index(-dh.lattice.sc_off[i]) j = dh.lattice.sc_index(-dh.lattice.sc_off[i])
h1, h1d = hh[i], hh[j] h1, h1d = hh[i], hh[j]
hh[i], hh[j] = (h1 + h1d.T.conj()) / 2, (h1d + h1.T.conj()) / 2 hh[i], hh[j] = (h1 + h1d.T.conj()) / 2, (h1d + h1.T.conj()) / 2
s1, s1d = ss[i], ss[j] s1, s1d = ss[i], ss[j]
ss[i], ss[j] = (s1 + s1d.T.conj()) / 2, (s1d + s1.T.conj()) / 2 ss[i], ss[j] = (s1 + s1d.T.conj()) / 2, (s1d + s1.T.conj()) / 2
# identifying TRS and TRB parts of the Hamiltonian # identifying TRS and TRB parts of the Hamiltonian
TAUY = np.kron(np.eye(NO), tau_y) TAUY = np.kron(np.eye(NO), tau_y)
hTR = np.array([TAUY @ hh[i].conj() @ TAUY for i in range(dh.lattice.nsc.prod())]) hTR = np.array([TAUY @ hh[i].conj() @ TAUY for i in range(dh.lattice.nsc.prod())])
hTRS = (hh + hTR) / 2 hTRS = (hh + hTR) / 2
hTRB = (hh - hTR) / 2 hTRB = (hh - hTR) / 2
# extracting the exchange field # extracting the exchange field
traced = [spin_tracer(hTRB[i]) for i in range(dh.lattice.nsc.prod())] # equation 77 traced = [spin_tracer(hTRB[i]) for i in range(dh.lattice.nsc.prod())] # equation 77
XCF = np.array( XCF = np.array(
[ [
np.array([f["x"] / 2 for f in traced]), np.array([f["x"] / 2 for f in traced]),
np.array([f["y"] / 2 for f in traced]), np.array([f["y"] / 2 for f in traced]),
np.array([f["z"] / 2 for f in traced]), np.array([f["z"] / 2 for f in traced]),
] ]
) )
# check if exchange field has scalar part # check if exchange field has scalar part
max_xcfs = abs(np.array(np.array([f["c"] / 2 for f in traced]))).max() max_xcfs = abs(np.array(np.array([f["c"] / 2 for f in traced]))).max()
if max_xcfs > 1e-12: if max_xcfs > 1e-12:
warnings.warn( warnings.warn(
f"Exchange field has non negligible scalar part. Largest value is {max_xcfs}" f"Exchange field has non negligible scalar part. Largest value is {max_xcfs}"
) )
if rank == root_node: if rank == root_node:
times["H_and_XCF_time"] = timer() times["H_and_XCF_time"] = timer()
print( print(
f"Hamiltonian and exchange field rotated. Elapsed time: {times['H_and_XCF_time']} s" f"Hamiltonian and exchange field rotated. Elapsed time: {times['H_and_XCF_time']} s"
@ -191,12 +189,12 @@ if rank == root_node:
"================================================================================================================================================================" "================================================================================================================================================================"
) )
# initialize pairs and magnetic entities based on input information # initialize pairs and magnetic entities based on input information
pairs, magnetic_entities = setup_pairs_and_magnetic_entities( pairs, magnetic_entities = setup_pairs_and_magnetic_entities(
magnetic_entities, pairs, dh, simulation_parameters magnetic_entities, pairs, dh, simulation_parameters
) )
if rank == root_node: if rank == root_node:
times["site_and_pair_dictionaries_time"] = timer() times["site_and_pair_dictionaries_time"] = timer()
print( print(
f"Site and pair dictionaries created. Elapsed time: {times['site_and_pair_dictionaries_time']} s" f"Site and pair dictionaries created. Elapsed time: {times['site_and_pair_dictionaries_time']} s"
@ -205,35 +203,35 @@ if rank == root_node:
"================================================================================================================================================================" "================================================================================================================================================================"
) )
# generate k space sampling # generate k space sampling
kset = make_kset( kset = make_kset(
dirs=simulation_parameters["kdirs"], NUMK=simulation_parameters["kset"] dirs=simulation_parameters["kdirs"], NUMK=simulation_parameters["kset"]
) )
# generate weights for k points # generate weights for k points
wkset = np.ones(len(kset)) / len(kset) wkset = np.ones(len(kset)) / len(kset)
# split the k points based on MPI size # split the k points based on MPI size
kpcs = np.array_split(kset, size) kpcs = np.array_split(kset, size)
# use progress bar if available # use progress bar if available
if rank == root_node and tqdm_imported: if rank == root_node and tqdm_imported:
kpcs[root_node] = tqdm(kpcs[root_node], desc="k loop") kpcs[root_node] = tqdm(kpcs[root_node], desc="k loop")
if rank == root_node: if rank == root_node:
times["k_set_time"] = timer() times["k_set_time"] = timer()
print(f"k set created. Elapsed time: {times['k_set_time']} s") print(f"k set created. Elapsed time: {times['k_set_time']} s")
print( print(
"================================================================================================================================================================" "================================================================================================================================================================"
) )
# this will contain the three Hamiltonian in the # this will contain the three Hamiltonian in the
# reference directions needed to calculate the energy # reference directions needed to calculate the energy
# variations upon rotation # variations upon rotation
hamiltonians = [] hamiltonians = []
# iterate over the reference directions (quantization axes) # iterate over the reference directions (quantization axes)
for i, orient in enumerate(simulation_parameters["ref_xcf_orientations"]): for i, orient in enumerate(simulation_parameters["ref_xcf_orientations"]):
# obtain rotated exchange field and Hamiltonian # obtain rotated exchange field and Hamiltonian
R = RotMa2b(simulation_parameters["scf_xcf_orientation"], orient["o"]) R = RotMa2b(simulation_parameters["scf_xcf_orientation"], orient["o"])
rot_XCF = np.einsum("ij,jklm->iklm", R, XCF) rot_XCF = np.einsum("ij,jklm->iklm", R, XCF)
@ -248,16 +246,6 @@ for i, orient in enumerate(simulation_parameters["ref_xcf_orientations"]):
# store the relevant information of the Hamiltonian # store the relevant information of the Hamiltonian
hamiltonians.append(dict(orient=orient["o"], H=rot_H)) hamiltonians.append(dict(orient=orient["o"], H=rot_H))
if simulation_parameters["calculate_charge"]:
hamiltonians[-1]["GS"] = np.zeros(
(simulation_parameters["eset"], rot_H.shape[1], rot_H.shape[2]),
dtype="complex128",
)
hamiltonians[-1]["GS_tmp"] = np.zeros(
(simulation_parameters["eset"], rot_H.shape[1], rot_H.shape[2]),
dtype="complex128",
)
# these are the rotations (for now) perpendicular to the quantization axis # these are the rotations (for now) perpendicular to the quantization axis
for u in orient["vw"]: for u in orient["vw"]:
# section 2.H # section 2.H
@ -270,7 +258,7 @@ for i, orient in enumerate(simulation_parameters["ref_xcf_orientations"]):
mag_ent["Vu1"][i].append(onsite_projection(Vu1, idx, idx)) mag_ent["Vu1"][i].append(onsite_projection(Vu1, idx, idx))
mag_ent["Vu2"][i].append(onsite_projection(Vu2, idx, idx)) mag_ent["Vu2"][i].append(onsite_projection(Vu2, idx, idx))
if rank == root_node: if rank == root_node:
times["reference_rotations_time"] = timer() times["reference_rotations_time"] = timer()
print( print(
f"Rotations done perpendicular to quantization axis. Elapsed time: {times['reference_rotations_time']} s" f"Rotations done perpendicular to quantization axis. Elapsed time: {times['reference_rotations_time']} s"
@ -279,14 +267,16 @@ if rank == root_node:
"================================================================================================================================================================" "================================================================================================================================================================"
) )
# provide helpful information to estimate the runtime and memory # provide helpful information to estimate the runtime and memory
# requirements of the Greens function calculations # requirements of the Greens function calculations
if rank == root_node: if rank == root_node:
print("Starting matrix inversions.") print("Starting matrix inversions.")
if simulation_parameters["padawan_mode"]: if simulation_parameters["padawan_mode"]:
print("Padawan mode: ") print("Padawan mode: ")
print(f"Total number of k points: {kset.shape[0]}") print(f"Total number of k points: {kset.shape[0]}")
print(f"Number of energy samples per k point: {simulation_parameters['eset']}") print(
f"Number of energy samples per k point: {simulation_parameters['eset']}"
)
print(f"Total number of directions: {len(hamiltonians)}") print(f"Total number of directions: {len(hamiltonians)}")
print( print(
f"Total number of matrix inversions: {kset.shape[0] * len(hamiltonians) * simulation_parameters['eset']}" f"Total number of matrix inversions: {kset.shape[0] * len(hamiltonians) * simulation_parameters['eset']}"
@ -311,19 +301,19 @@ if rank == root_node:
"================================================================================================================================================================" "================================================================================================================================================================"
) )
# MPI barrier # MPI barrier
comm.Barrier() comm.Barrier()
# make energy contour # make energy contour
cont = make_contour( cont = make_contour(
emin=simulation_parameters["ebot"], emin=simulation_parameters["ebot"],
enum=simulation_parameters["eset"], enum=simulation_parameters["eset"],
p=simulation_parameters["esetp"], p=simulation_parameters["esetp"],
) )
eran = cont.ze eran = cont.ze
# sampling the integrand on the contour and the BZ # sampling the integrand on the contour and the BZ
for k in kpcs[rank]: for k in kpcs[rank]:
# weight of k point in BZ integral # weight of k point in BZ integral
wk = wkset[rank] wk = wkset[rank]
@ -339,10 +329,6 @@ for k in kpcs[rank]:
# solve Greens function sequentially for the energies, because of memory bound # solve Greens function sequentially for the energies, because of memory bound
Gk = sequential_GK(HK, SK, eran, simulation_parameters["eset"]) Gk = sequential_GK(HK, SK, eran, simulation_parameters["eset"])
# saving this for total charge
if simulation_parameters["calculate_charge"]:
hamiltonian_orientation["GS_tmp"] += Gk @ SK * wk
# store the Greens function slice of the magnetic entities # store the Greens function slice of the magnetic entities
for mag_ent in magnetic_entities: for mag_ent in magnetic_entities:
idx = mag_ent["spin_box_indices"] idx = mag_ent["spin_box_indices"]
@ -360,12 +346,8 @@ for k in kpcs[rank]:
pair["Gij_tmp"][i] += onsite_projection(Gk, ai, aj) * phase * wk pair["Gij_tmp"][i] += onsite_projection(Gk, ai, aj) * phase * wk
pair["Gji_tmp"][i] += onsite_projection(Gk, aj, ai) / phase * wk pair["Gji_tmp"][i] += onsite_projection(Gk, aj, ai) / phase * wk
# summ reduce partial results of mpi nodes # summ reduce partial results of mpi nodes
for i in range(len(hamiltonians)): for i in range(len(hamiltonians)):
# for total charge calculation
if simulation_parameters["calculate_charge"]:
comm.Reduce(hamiltonians[i]["GS_tmp"], hamiltonians[i]["GS"], root=root_node)
for mag_ent in magnetic_entities: for mag_ent in magnetic_entities:
comm.Reduce(mag_ent["Gii_tmp"][i], mag_ent["Gii"][i], root=root_node) comm.Reduce(mag_ent["Gii_tmp"][i], mag_ent["Gii"][i], root=root_node)
@ -373,7 +355,7 @@ for i in range(len(hamiltonians)):
comm.Reduce(pair["Gij_tmp"][i], pair["Gij"][i], root=root_node) comm.Reduce(pair["Gij_tmp"][i], pair["Gij"][i], root=root_node)
comm.Reduce(pair["Gji_tmp"][i], pair["Gji"][i], root=root_node) comm.Reduce(pair["Gji_tmp"][i], pair["Gji"][i], root=root_node)
if rank == root_node: if rank == root_node:
times["green_function_inversion_time"] = timer() times["green_function_inversion_time"] = timer()
print( print(
f"Calculated Greens functions. Elapsed time: {times['green_function_inversion_time']} s" f"Calculated Greens functions. Elapsed time: {times['green_function_inversion_time']} s"
@ -382,15 +364,7 @@ if rank == root_node:
"================================================================================================================================================================" "================================================================================================================================================================"
) )
if rank == root_node:
if rank == root_node:
# Calculate total charge
if simulation_parameters["calculate_charge"]:
for hamiltonian in hamiltonians:
GS = hamiltonian["GS"]
traced = np.trace((GS), axis1=1, axis2=2)
simulation_parameters["charges"].append(int_de_ke(traced, cont.we))
# iterate over the magnetic entities # iterate over the magnetic entities
for tracker, mag_ent in enumerate(magnetic_entities): for tracker, mag_ent in enumerate(magnetic_entities):
# iterate over the quantization axes # iterate over the quantization axes
@ -473,3 +447,7 @@ if rank == root_node:
save_pickle(simulation_parameters["outfile"], results) save_pickle(simulation_parameters["outfile"], results)
print("\n\n\n\n\n") print("\n\n\n\n\n")
if __name__ == "__main__":
main()

@ -35,13 +35,9 @@ default_args = dict(
kset=2, kset=2,
kdirs="xyz", kdirs="xyz",
ebot=None, ebot=None,
automatic_ebot=False,
eset=42, eset=42,
esetp=1000, esetp=1000,
calculate_charge=False,
charges=[],
parallel_solver_for_Gk=False, parallel_solver_for_Gk=False,
parallel_size=None,
padawan_mode=True, padawan_mode=True,
) )
@ -60,27 +56,33 @@ default_args = dict(
def save_pickle(outfile, data): def save_pickle(outfile, data):
"""_summary_ """Saves the data in the outfile with pickle.
Args: Args:
outfile (_type_): _description_ outfile : str
data (_type_): _description_ Path to outfile
data : dict
Contains the data
""" """
# save dictionary # save dictionary
with open(outfile, "wb") as output_file: with open(outfile, "wb") as output_file:
dump(data, output_file) dump(data, output_file)
def load_pickle(infile, data): def load_pickle(infile):
"""_summary_ """Loads the data from the infile with pickle.
Args: Args:
infile (_type_): _description_ infile : str
data (_type_): _description_ Path to infile
Returns: Returns:
_type_: _description_ data : dict
A dictionary of data
""" """
# open and read file
with open(infile, "wb") as input_file: with open(infile, "wb") as input_file:
data = load(data, input_file) data = load(data, input_file)
@ -88,11 +90,13 @@ def load_pickle(infile, data):
def print_parameters(simulation_parameters): def print_parameters(simulation_parameters):
"""_summary_ """It prints the simulation parameters for the grogu out.
Args: Args:
simulation_parameters (_type_): _description_ simulation_parameters : dict
It contains the simulations parameters
""" """
print( print(
"================================================================================================================================================================" "================================================================================================================================================================"
) )
@ -138,12 +142,15 @@ def print_parameters(simulation_parameters):
def print_atoms_and_pairs(magnetic_entities, pairs): def print_atoms_and_pairs(magnetic_entities, pairs):
"""_summary_ """It prints the pair and magnetic entity information for the grogu out.
Args: Args:
magnetic_entities (_type_): _description_ magnetic_entities : dict
pairs (_type_): _description_ It contains the data on the magnetic entities
pairs : dict
It contains the data on the pairs
""" """
print("Atomic information: ") print("Atomic information: ")
print( print(
"----------------------------------------------------------------------------------------------------------------------------------------------------------------" "----------------------------------------------------------------------------------------------------------------------------------------------------------------"
@ -223,11 +230,13 @@ def print_atoms_and_pairs(magnetic_entities, pairs):
def print_runtime_information(times): def print_runtime_information(times):
"""_summary_ """It prints the runtime information for the grogu out.
Args: Args:
times (_type_): _description_ times : dict
It contains the runtime data
""" """
print("Runtime information: ") print("Runtime information: ")
print(f"Total runtime: {times['end_time'] - times['start_time']} s") print(f"Total runtime: {times['end_time'] - times['start_time']} s")
print( print(
@ -255,10 +264,12 @@ def print_runtime_information(times):
def print_job_description(simulation_parameters): def print_job_description(simulation_parameters):
"""_summary_ """It prints the parameters and the description of the job.
Args: Args:
simulation_parameters (_type_): _description_ simulation_parameters : dict
It contains the simulations parameters
""" """
print( print(

@ -21,21 +21,119 @@
import numpy as np import numpy as np
def blow_up_orbindx(orb_indices):
"""Function to blow up orbital indices to make SPIN BOX indices.
Args:
orb_indices : np.array_like
These are the indices in ORBITAL BOX
Returns:
orb_indices : np.array_like
These are the indices in SPIN BOX
"""
orb_indices = np.array([[2 * o, 2 * o + 1] for o in orb_indices]).flatten()
return orb_indices
def spin_tracer(M):
"""Spin tracer utility.
This takes an operator with the orbital-spin sequence:
orbital 1 up,
orbital 1 down,
orbital 2 up,
orbital 2 down,
that is in the SPIN-BOX representation,
and extracts orbital dependent Pauli traces.
Args:
M : np.array_like
Traceable matrix
Returns:
dict
It contains the traced matrix with "x", "y", "z" and "c"
"""
M11 = M[0::2, 0::2]
M12 = M[0::2, 1::2]
M21 = M[1::2, 0::2]
M22 = M[1::2, 1::2]
M_o = dict()
M_o["x"] = M12 + M21
M_o["y"] = 1j * (M12 - M21)
M_o["z"] = M11 - M22
M_o["c"] = M11 + M22
return M_o
def parse_magnetic_entity(dh, atom=None, l=None, **kwargs):
"""Function to define orbital indexes of a given magnetic entity.
Args:
dh : sisl.physics.Hamiltonian
Hamiltonian from sisl
atom : integer or list of integers, optional
Defining atom (or atoms) in the unit cell forming the magnetic entity. Defaults to None
l : integer, optional
Defining the angular momentum channel. Defaults to None
Returns:
list
The orbital indexes of the given magnetic entity
"""
# case where we deal with more than one atom defining the magnetic entity
if type(atom) == list:
dat = []
for a in atom:
a_orb_idx = dh.geometry.a2o(a, all=True)
if (
type(l) == int
): # if specified we restrict to given l angular momentum channel inside each atom
a_orb_idx = a_orb_idx[[o.l == l for o in dh.geometry.atoms[a].orbitals]]
dat.append(a_orb_idx)
orbital_indeces = np.hstack(dat)
# case where we deal with a singel atom magnetic entity
elif type(atom) == int:
orbital_indeces = dh.geometry.a2o(atom, all=True)
if (
type(l) == int
): # if specified we restrict to given l angular momentum channel
orbital_indeces = orbital_indeces[
[o.l == l for o in dh.geometry.atoms[atom].orbitals]
]
return orbital_indeces # numpy array containing integers labeling orbitals associated to a magnetic entity.
def calculate_anisotropy_tensor(mag_ent): def calculate_anisotropy_tensor(mag_ent):
"""_summary_ """Calculates the renormalized anisotropy tensor from the energies.
It uses the grogu convention for output.
Args: Args:
mag_ent (_type_): _description_ mag_ent : dict
An element from the magnetic entities
Returns: Returns:
_type_: _description_ K : np.array_like
elements of the anisotropy tensor
""" """
# get the energies
energies = mag_ent["energies"] energies = mag_ent["energies"]
# calculate the diagonal tensor elements
Kxx = energies[1, 1] - energies[1, 0] Kxx = energies[1, 1] - energies[1, 0]
Kyy = energies[0, 1] - energies[0, 0] Kyy = energies[0, 1] - energies[0, 0]
Kzz = 0 Kzz = 0
# perform consistency check
calculated_diff = Kyy - Kxx calculated_diff = Kyy - Kxx
expected_diff = energies[2, 0] - energies[2, 1] expected_diff = energies[2, 0] - energies[2, 1]
consistency_check = abs(calculated_diff - expected_diff) consistency_check = abs(calculated_diff - expected_diff)
@ -44,14 +142,28 @@ def calculate_anisotropy_tensor(mag_ent):
def calculate_exchange_tensor(pair): def calculate_exchange_tensor(pair):
"""_summary_ """Calculates the exchange tensor from the energies.
It produces the isotropic exchange, the relevant elements
from the Dzyaloshinskii-Morilla (Dm) tensor, the symmetric-anisotropy
and the complete exchange tensor.
Args: Args:
pair (_type_): _description_ pair : dict
An element from the pairs
Returns: Returns:
_type_: _description_ J_iso : float
Isotropic exchange (Tr[J] / 3)
J_S : np.array_like
Symmetric-anisotropy (J_S = J - J_iso * I > Jxx, Jyy, Jxy, Jxz, Jyz)
D : np.array_like
DM elements (Dx, Dy, Dz)
J : np.array_like
Complete exchange tensor flattened (Jxx, Jxy, Jxz, Jyx, Jyy, Jyz, Jzx, Jzy, Jzz)
""" """
# energies from rotations
energies = pair["energies"] energies = pair["energies"]
# Initialize output arrays # Initialize output arrays
J = np.zeros((3, 3)) J = np.zeros((3, 3))
@ -96,16 +208,3 @@ def calculate_exchange_tensor(pair):
J_S = np.array([J[0, 0] - J_iso, J[1, 1] - J_iso, J[0, 1], J[0, 2], J[1, 2]]) J_S = np.array([J[0, 0] - J_iso, J[1, 1] - J_iso, J[0, 1], J[0, 2], J[1, 2]])
return J_iso, J_S, D, J return J_iso, J_S, D, J
def int_de_ke(traced, we):
"""_summary_
Args:
traced (_type_): _description_
we (_type_): _description_
Returns:
_type_: _description_
"""
return np.trapz(-1 / np.pi * np.imag(traced * we))

@ -29,6 +29,25 @@ tau_z = np.array([[1, 0], [0, -1]])
tau_0 = np.array([[1, 0], [0, 1]]) tau_0 = np.array([[1, 0], [0, 1]])
def commutator(a, b):
"""Shorthand for commutator.
Commutator of two matrices in the mathematical sense.
Args:
a : np.array_like
The first matrix
b : np.array_like
The second matrix
Returns:
np.array_like
The commutator of a and b
"""
return a @ b - b @ a
# define some useful functions # define some useful functions
def hsk(H, ss, sc_off, k=(0, 0, 0)): def hsk(H, ss, sc_off, k=(0, 0, 0)):
"""Speed up Hk and Sk generation. """Speed up Hk and Sk generation.
@ -36,14 +55,20 @@ def hsk(H, ss, sc_off, k=(0, 0, 0)):
Calculates the Hamiltonian and the Overlap matrix at a given k point. It is faster that the sisl version. Calculates the Hamiltonian and the Overlap matrix at a given k point. It is faster that the sisl version.
Args: Args:
H (np.array_like): Hamiltonian in spin box form H : np.array_like
ss (np.array_like): Overlap matrix in spin box form Hamiltonian in spin box form
sc_off (list): supercell indexes of the Hamiltonian ss : np.array_like
k (tuple, optional): The k point where the matrices are set up. Defaults to (0, 0, 0). Overlap matrix in spin box form
sc_off : list
supercell indexes of the Hamiltonian
k : tuple, optional
The k point where the matrices are set up. Defaults to (0, 0, 0)
Returns: Returns:
np.array_like: Hamiltonian at the given k point np.array_like
np.array_like: Overlap matrix at the given k point Hamiltonian at the given k point
np.array_like
Overlap matrix at the given k point
""" """
# this two conversion lines are from the sisl source # this two conversion lines are from the sisl source
@ -60,46 +85,6 @@ def hsk(H, ss, sc_off, k=(0, 0, 0)):
return HK, SK return HK, SK
def make_contour(emin=-20, emax=0.0, enum=42, p=150):
"""A more sophisticated contour generator.
Calculates the parameters for the complex contour integral.
Args:
emin (int, optional): Energy minimum of the contour. Defaults to -20.
emax (float, optional): Energy maximum of the contour. Defaults to 0.0, so the Fermi level.
enum (int, optional): Number of sample points along the contour. Defaults to 42.
p (int, optional): Shape parameter that describes the distribution of the sample points. Defaults to 150.
Returns:
ccont: ccont
Contains all the information for the contour integral. Should clarify later...
"""
x, wl = roots_legendre(enum)
R = (emax - emin) / 2
z0 = (emax + emin) / 2
y1 = -np.log(1 + np.pi * p)
y2 = 0
y = (y2 - y1) / 2 * x + (y2 + y1) / 2
phi = (np.exp(-y) - 1) / p
ze = z0 + R * np.exp(1j * phi)
we = -(y2 - y1) / 2 * np.exp(-y) / p * 1j * (ze - z0) * wl
# just an empty container class
class ccont:
pass
cont = ccont()
cont.R = R
cont.z0 = z0
cont.ze = ze
cont.we = we
cont.enum = enum
return cont
def make_kset(dirs="xyz", NUMK=20): def make_kset(dirs="xyz", NUMK=20):
"""Simple k-grid generator to sample the Brillouin zone. """Simple k-grid generator to sample the Brillouin zone.
@ -110,12 +95,17 @@ def make_kset(dirs="xyz", NUMK=20):
Args: Args:
dirs (str, optional): Directions of the k points in the Brillouin zone. They are the three lattice vectors. Defaults to "xyz". dirs : str, optional
NUMK (int, optional): The number of k points in a direction. Defaults to 20. Directions of the k points in the Brillouin zone. They are the three lattice vectors. Defaults to "xyz"
NUMK : int, optional
The number of k points in a direction. Defaults to 20
Returns: Returns:
np.array_like: An array of k points that uniformly sample the Brillouin zone in the given directions. np.array_like
An array of k points that uniformly sample the Brillouin zone in the given directions
""" """
# if there is no xyz in dirs return the Gamma point
if not (sum([d in dirs for d in "xyz"])): if not (sum([d in dirs for d in "xyz"])):
return np.array([[0, 0, 0]]) return np.array([[0, 0, 0]])
@ -133,19 +123,50 @@ def make_kset(dirs="xyz", NUMK=20):
return kset return kset
def commutator(a, b): def make_contour(emin=-20, emax=0.0, enum=42, p=150):
"""Shorthand for commutator. """A more sophisticated contour generator.
Commutator of two matrices in the mathematical sense. Calculates the parameters for the complex contour integral. It uses the
Legendre-Gauss quadrature method. It returns a class that contains
the information for the contour integral.
Args: Args:
a (np.array_like): The first matrix. emin : int, optional
b (np.array_like): The second matrix Energy minimum of the contour. Defaults to -20
emax : float, optional
Energy maximum of the contour. Defaults to 0.0, so the Fermi level
enum : int, optional
Number of sample points along the contour. Defaults to 42
p : int, optional
Shape parameter that describes the distribution of the sample points. Defaults to 150
Returns: Returns:
np.array_like: The commutator of a and b. ccont
Contains all the information for the contour integral
""" """
return a @ b - b @ a x, wl = roots_legendre(enum)
R = (emax - emin) / 2 # radius
z0 = (emax + emin) / 2 # center point
y1 = -np.log(1 + np.pi * p) # lower bound
y2 = 0 # upper bound
y = (y2 - y1) / 2 * x + (y2 + y1) / 2
phi = (np.exp(-y) - 1) / p # angle parameter
ze = z0 + R * np.exp(1j * phi) # complex points for path
we = -(y2 - y1) / 2 * np.exp(-y) / p * 1j * (ze - z0) * wl
# just an empty container class
class ccont:
pass
cont = ccont()
cont.R = R
cont.z0 = z0
cont.ze = ze
cont.we = we
cont.enum = enum
return cont
def tau_u(u): def tau_u(u):
@ -154,10 +175,12 @@ def tau_u(u):
Returns the vector u in the basis of the Pauli matrices. Returns the vector u in the basis of the Pauli matrices.
Args: Args:
u (list or np.array_like): The direction u : list or np.array_like
The direction
Returns: Returns:
np.array_like: Arbitrary direction in the base of the Pauli matrices. np.array_like
Arbitrary direction in the base of the Pauli matrices
""" """
# u is force to be of unit length # u is force to be of unit length
@ -173,10 +196,12 @@ def crossM(u):
It acts as a cross product with vector u. It acts as a cross product with vector u.
Args: Args:
u (list or np.array_like): The second vector in the cross product u : list or np.array_like
The second vector in the cross product
Returns: Returns:
np.array_like: The matrix that represents teh cross product with a vector. np.array_like
The matrix that represents teh cross product with a vector
""" """
return np.array([[0, -u[2], u[1]], [u[2], 0, -u[0]], [-u[1], u[0], 0]]) return np.array([[0, -u[2], u[1]], [u[2], 0, -u[0]], [-u[1], u[0], 0]])
@ -186,12 +211,16 @@ def RotM(theta, u, eps=1e-10):
Args: Args:
theta (float): The angle of rotation. theta : float
u (np.array_like): The rotation axis. The angle of rotation
eps (float, optional): Cutoff for small elements in the resulting matrix. Defaults to 1e-10. u : np.array_like
The rotation axis
eps : float, optional
Cutoff for small elements in the resulting matrix. Defaults to 1e-10
Returns: Returns:
np.array_like: The rotation matrix. np.array_like
The rotation matrix
""" """
u = u / np.linalg.norm(u) u = u / np.linalg.norm(u)
@ -213,12 +242,16 @@ def RotMa2b(a, b, eps=1e-10):
Function returns array R such that R @ a = b holds. Function returns array R such that R @ a = b holds.
Args: Args:
a (np.array_like): First vector. a : np.array_like
b (np.array_like): Second vector. First vector
eps (float, optional): Cutoff for small elements in the resulting matrix. Defaults to 1e-10. b : np.array_like
Second vector
eps : float, optional
Cutoff for small elements in the resulting matrix. Defaults to 1e-10
Returns: Returns:
np.array_like: The rotation matrix with the above property. np.array_like
The rotation matrix with the above property
""" """
v = np.cross(a, b) v = np.cross(a, b)
@ -230,89 +263,42 @@ def RotMa2b(a, b, eps=1e-10):
return M return M
def spin_tracer(M): def read_siesta_emin(eigfile):
"""Spin tracer utility. """It reads the lowest energy level from the siesta run.
This takes an operator with the orbital-spin sequence:
orbital 1 up,
orbital 1 down,
orbital 2 up,
orbital 2 down,
that is in the SPIN-BOX representation,
and extracts orbital dependent Pauli traces.
It uses the .EIG file from siesta that contains the eigenvalues.
Args: Args:
M (np.array_like): Traceble matrix. eigfile : str
The path to the .EIG file
Returns: Returns:
dict: It contains the traced matrix with "x", "y", "z" and "c". float
The energy minimum
""" """
M11 = M[0::2, 0::2]
M12 = M[0::2, 1::2]
M21 = M[1::2, 0::2]
M22 = M[1::2, 1::2]
M_o = dict()
M_o["x"] = M12 + M21
M_o["y"] = 1j * (M12 - M21)
M_o["z"] = M11 - M22
M_o["c"] = M11 + M22
return M_o
# read the file
eigs = eigSileSiesta(eigfile).read_data()
def parse_magnetic_entity(dh, atom=None, l=None, **kwargs): return eigs.min()
"""Function to define orbital indexes of a given magnetic entity.
Args:
dh (sisl.physics.Hamiltonian): Hamiltonian
atom (integer or list of integers, optional): Defining atom (or atoms) in the unit cell forming the magnetic entity. Defaults to None.
l (integer, optional): Defining the angular momentum channel. Defaults to None.
Returns:
list: The orbital indexes of the given magnetic entity.
"""
# case where we deal with more than one atom defining the magnetic entity
if type(atom) == list:
dat = []
for a in atom:
a_orb_idx = dh.geometry.a2o(a, all=True)
if (
type(l) == int
): # if specified we restrict to given l angular momentum channel inside each atom
a_orb_idx = a_orb_idx[[o.l == l for o in dh.geometry.atoms[a].orbitals]]
dat.append(a_orb_idx)
orbital_indeces = np.hstack(dat)
# case where we deal with a singel atom magnetic entity
elif type(atom) == int:
orbital_indeces = dh.geometry.a2o(atom, all=True)
if (
type(l) == int
): # if specified we restrict to given l angular momentum channel
orbital_indeces = orbital_indeces[
[o.l == l for o in dh.geometry.atoms[atom].orbitals]
]
return orbital_indeces # numpy array containing integers labeling orbitals associated to a magnetic entity.
def blow_up_orbindx(orb_indices):
"""
Function to blow up orbital indeces to make SPIN BOX indices.
"""
return np.array([[2 * o, 2 * o + 1] for o in orb_indices]).flatten()
def int_de_ke(traced, we):
"""It numerically integrates the traced matrix.
def read_siesta_emin(eigfile): It is a wrapper from numpy.trapz and it contains the
"""_summary_ relevant constants to calculate the energy integral from
equation 93 or 96.
Args: Args:
eigfile (_type_): _description_ traced : np.array_like
The trace of a matrix or a matrix product
we : float
The weight of a point on the contour
Returns: Returns:
_type_: _description_ float
The energy calculated from the integral formula
""" """
eigs = eigSileSiesta(eigfile).read_data()
return eigs.min() return np.trapz(-1 / np.pi * np.imag(traced * we))

Loading…
Cancel
Save