You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
476 lines
18 KiB
476 lines
18 KiB
# Copyright (c) [2024] [Daniel Pozsar]
|
|
#
|
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
# of this software and associated documentation files (the "Software"), to deal
|
|
# in the Software without restriction, including without limitation the rights
|
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
# copies of the Software, and to permit persons to whom the Software is
|
|
# furnished to do so, subject to the following conditions:
|
|
#
|
|
# The above copyright notice and this permission notice shall be included in all
|
|
# copies or substantial portions of the Software.
|
|
#
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
# SOFTWARE.
|
|
|
|
import os
|
|
|
|
os.environ["OMP_NUM_THREADS"] = "1" # export OMP_NUM_THREADS=1
|
|
os.environ["OPENBLAS_NUM_THREADS"] = "1" # export OPENBLAS_NUM_THREADS=1
|
|
os.environ["MKL_NUM_THREADS"] = "1" # export MKL_NUM_THREADS=1
|
|
os.environ["VECLIB_MAXIMUM_THREADS"] = "1" # export VECLIB_MAXIMUM_THREADS=1
|
|
os.environ["NUMEXPR_NUM_THREADS"] = "1" # export NUMEXPR_NUM_THREADS=1
|
|
|
|
|
|
from timeit import default_timer as timer
|
|
|
|
# runtime information
|
|
times = dict()
|
|
times["start_time"] = timer()
|
|
|
|
|
|
import warnings
|
|
from sys import getsizeof
|
|
|
|
import sisl
|
|
from mpi4py import MPI
|
|
|
|
from src.grogu_magn import *
|
|
|
|
# input output stuff
|
|
######################################################################
|
|
######################################################################
|
|
######################################################################
|
|
|
|
infile = (
|
|
"/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf"
|
|
)
|
|
outfile = "./Fe3GeTe2_notebook"
|
|
|
|
magnetic_entities = [
|
|
dict(atom=3, l=2),
|
|
dict(atom=4, l=2),
|
|
dict(atom=5, l=2),
|
|
]
|
|
pairs = [
|
|
dict(ai=0, aj=1, Ruc=np.array([0, 0, 0])),
|
|
dict(ai=0, aj=2, Ruc=np.array([0, 0, 0])),
|
|
dict(ai=1, aj=2, Ruc=np.array([0, 0, 0])),
|
|
dict(ai=0, aj=2, Ruc=np.array([-1, -1, 0])),
|
|
dict(ai=1, aj=2, Ruc=np.array([-1, -1, 0])),
|
|
dict(ai=0, aj=2, Ruc=np.array([-1, 0, 0])),
|
|
dict(ai=1, aj=2, Ruc=np.array([-1, 0, 0])),
|
|
dict(ai=1, aj=2, Ruc=np.array([-2, 0, 0])),
|
|
dict(ai=1, aj=2, Ruc=np.array([-3, 0, 0])),
|
|
]
|
|
simulation_parameters = default_args
|
|
simulation_parameters["infile"] = infile
|
|
simulation_parameters["outfile"] = outfile
|
|
simulation_parameters["kset"] = 20
|
|
simulation_parameters["kdirs"] = "xy"
|
|
simulation_parameters["eset"] = 600
|
|
simulation_parameters["esetp"] = 10000
|
|
|
|
fdf = sisl.io.fdfSileSiesta("input.fdf")
|
|
fdf.get("XCF_Rotation")
|
|
|
|
######################################################################
|
|
######################################################################
|
|
######################################################################
|
|
|
|
|
|
# MPI parameters
|
|
comm = MPI.COMM_WORLD
|
|
size = comm.Get_size()
|
|
rank = comm.Get_rank()
|
|
root_node = 0
|
|
|
|
# include parallel size in simulation parameters
|
|
simulation_parameters["parallel_size"] = size
|
|
|
|
# check versions for debugging
|
|
if rank == root_node:
|
|
try:
|
|
print("sisl version: ", sisl.__version__)
|
|
except:
|
|
print("sisl version unknown.")
|
|
try:
|
|
print("numpy version: ", np.__version__)
|
|
except:
|
|
print("numpy version unknown.")
|
|
|
|
# rename outfile
|
|
if not simulation_parameters["outfile"].endswith(".pickle"):
|
|
simulation_parameters["outfile"] += ".pickle"
|
|
|
|
# if ebot is not given put it 0.1 eV under the smallest energy
|
|
if simulation_parameters["ebot"] is None:
|
|
try:
|
|
eigfile = simulation_parameters["infile"][:-3] + "EIG"
|
|
simulation_parameters["ebot"] = read_siesta_emin(eigfile) - 0.1
|
|
simulation_parameters["automatic_ebot"] = True
|
|
except:
|
|
print("Could not determine ebot.")
|
|
print("Parameter was not given and .EIG file was not found.")
|
|
|
|
# read sile
|
|
fdf = sisl.get_sile(simulation_parameters["infile"])
|
|
|
|
# read in hamiltonian
|
|
dh = fdf.read_hamiltonian()
|
|
|
|
# read unit cell vectors
|
|
simulation_parameters["cell"] = fdf.read_geometry().cell
|
|
|
|
# unit cell index
|
|
uc_in_sc_idx = dh.lattice.sc_index([0, 0, 0])
|
|
|
|
if rank == root_node:
|
|
print("\n\n\n\n\n")
|
|
print(
|
|
"#################################################################### JOB INFORMATION ###########################################################################"
|
|
)
|
|
print_job_description(simulation_parameters)
|
|
print(
|
|
"################################################################################################################################################################"
|
|
)
|
|
print("\n\n\n\n\n")
|
|
|
|
times["setup_time"] = timer()
|
|
print(f"Setup done. Elapsed time: {times['setup_time']} s")
|
|
print(
|
|
"================================================================================================================================================================"
|
|
)
|
|
|
|
|
|
# reformat Hamltonian and Overlap matrix for manipulations
|
|
hh, ss, NO = build_hh_ss(dh)
|
|
|
|
# symmetrizing Hamiltonian and Overlap matrix to make them hermitian
|
|
for i in range(dh.lattice.sc_off.shape[0]):
|
|
j = dh.lattice.sc_index(-dh.lattice.sc_off[i])
|
|
h1, h1d = hh[i], hh[j]
|
|
hh[i], hh[j] = (h1 + h1d.T.conj()) / 2, (h1d + h1.T.conj()) / 2
|
|
s1, s1d = ss[i], ss[j]
|
|
ss[i], ss[j] = (s1 + s1d.T.conj()) / 2, (s1d + s1.T.conj()) / 2
|
|
|
|
# identifying TRS and TRB parts of the Hamiltonian
|
|
TAUY = np.kron(np.eye(NO), tau_y)
|
|
hTR = np.array([TAUY @ hh[i].conj() @ TAUY for i in range(dh.lattice.nsc.prod())])
|
|
hTRS = (hh + hTR) / 2
|
|
hTRB = (hh - hTR) / 2
|
|
|
|
# extracting the exchange field
|
|
traced = [spin_tracer(hTRB[i]) for i in range(dh.lattice.nsc.prod())] # equation 77
|
|
XCF = np.array(
|
|
[
|
|
np.array([f["x"] / 2 for f in traced]),
|
|
np.array([f["y"] / 2 for f in traced]),
|
|
np.array([f["z"] / 2 for f in traced]),
|
|
]
|
|
)
|
|
|
|
# check if exchange field has scalar part
|
|
max_xcfs = abs(np.array(np.array([f["c"] / 2 for f in traced]))).max()
|
|
if max_xcfs > 1e-12:
|
|
warnings.warn(
|
|
f"Exchange field has non negligible scalar part. Largest value is {max_xcfs}"
|
|
)
|
|
|
|
if rank == root_node:
|
|
times["H_and_XCF_time"] = timer()
|
|
print(
|
|
f"Hamiltonian and exchange field rotated. Elapsed time: {times['H_and_XCF_time']} s"
|
|
)
|
|
print(
|
|
"================================================================================================================================================================"
|
|
)
|
|
|
|
# initialize pairs and magnetic entities based on input information
|
|
pairs, magnetic_entities = setup_pairs_and_magnetic_entities(
|
|
magnetic_entities, pairs, dh, simulation_parameters
|
|
)
|
|
|
|
if rank == root_node:
|
|
times["site_and_pair_dictionaries_time"] = timer()
|
|
print(
|
|
f"Site and pair dictionaries created. Elapsed time: {times['site_and_pair_dictionaries_time']} s"
|
|
)
|
|
print(
|
|
"================================================================================================================================================================"
|
|
)
|
|
|
|
# generate k space sampling
|
|
kset = make_kset(
|
|
dirs=simulation_parameters["kdirs"], NUMK=simulation_parameters["kset"]
|
|
)
|
|
|
|
# generate weights for k points
|
|
wkset = np.ones(len(kset)) / len(kset)
|
|
|
|
# split the k points based on MPI size
|
|
kpcs = np.array_split(kset, size)
|
|
|
|
# use progress bar if available
|
|
if rank == root_node and tqdm_imported:
|
|
kpcs[root_node] = tqdm(kpcs[root_node], desc="k loop")
|
|
|
|
if rank == root_node:
|
|
times["k_set_time"] = timer()
|
|
print(f"k set created. Elapsed time: {times['k_set_time']} s")
|
|
print(
|
|
"================================================================================================================================================================"
|
|
)
|
|
|
|
# this will contain the three Hamiltonian in the
|
|
# reference directions needed to calculate the energy
|
|
# variations upon rotation
|
|
hamiltonians = []
|
|
|
|
# iterate over the reference directions (quantization axes)
|
|
for i, orient in enumerate(simulation_parameters["ref_xcf_orientations"]):
|
|
# obtain rotated exchange field and Hamiltonian
|
|
R = RotMa2b(simulation_parameters["scf_xcf_orientation"], orient["o"])
|
|
rot_XCF = np.einsum("ij,jklm->iklm", R, XCF)
|
|
rot_H_XCF = sum(
|
|
[np.kron(rot_XCF[i], tau) for i, tau in enumerate([tau_x, tau_y, tau_z])]
|
|
)
|
|
rot_H_XCF_uc = rot_H_XCF[uc_in_sc_idx]
|
|
|
|
# obtain total Hamiltonian with the rotated exchange field
|
|
rot_H = hTRS + rot_H_XCF # equation 76
|
|
|
|
# store the relevant information of the Hamiltonian
|
|
hamiltonians.append(dict(orient=orient["o"], H=rot_H))
|
|
|
|
if simulation_parameters["calculate_charge"]:
|
|
hamiltonians[-1]["GS"] = np.zeros(
|
|
(simulation_parameters["eset"], rot_H.shape[1], rot_H.shape[2]),
|
|
dtype="complex128",
|
|
)
|
|
hamiltonians[-1]["GS_tmp"] = np.zeros(
|
|
(simulation_parameters["eset"], rot_H.shape[1], rot_H.shape[2]),
|
|
dtype="complex128",
|
|
)
|
|
|
|
# these are the rotations (for now) perpendicular to the quantization axis
|
|
for u in orient["vw"]:
|
|
# section 2.H
|
|
Tu = np.kron(np.eye(NO, dtype=int), tau_u(u))
|
|
Vu1, Vu2 = calc_Vu(rot_H_XCF_uc, Tu)
|
|
|
|
for mag_ent in magnetic_entities:
|
|
idx = mag_ent["spin_box_indices"]
|
|
# fill up the perturbed potentials (for now) based on the on-site projections
|
|
mag_ent["Vu1"][i].append(onsite_projection(Vu1, idx, idx))
|
|
mag_ent["Vu2"][i].append(onsite_projection(Vu2, idx, idx))
|
|
|
|
if rank == root_node:
|
|
times["reference_rotations_time"] = timer()
|
|
print(
|
|
f"Rotations done perpendicular to quantization axis. Elapsed time: {times['reference_rotations_time']} s"
|
|
)
|
|
print(
|
|
"================================================================================================================================================================"
|
|
)
|
|
|
|
# provide helpful information to estimate the runtime and memory
|
|
# requirements of the Greens function calculations
|
|
if rank == root_node:
|
|
print("Starting matrix inversions.")
|
|
if simulation_parameters["padawan_mode"]:
|
|
print("Padawan mode: ")
|
|
print(f"Total number of k points: {kset.shape[0]}")
|
|
print(f"Number of energy samples per k point: {simulation_parameters['eset']}")
|
|
print(f"Total number of directions: {len(hamiltonians)}")
|
|
print(
|
|
f"Total number of matrix inversions: {kset.shape[0] * len(hamiltonians) * simulation_parameters['eset']}"
|
|
)
|
|
print(
|
|
f"The shape of the Hamiltonian and the Greens function is {NO}x{NO}={NO*NO}"
|
|
)
|
|
# https://stackoverflow.com/questions/70746660/how-to-predict-memory-requirement-for-np-linalg-inv
|
|
# memory is O(64 n**2) for complex matrices
|
|
memory_size = getsizeof(hamiltonians[0]["H"].base) / 1024
|
|
print(
|
|
f"Memory taken by a single Hamiltonian is: {getsizeof(hamiltonians[0]['H'].base) / 1024} KB"
|
|
)
|
|
print(f"Expected memory usage per matrix inversion: {memory_size * 32} KB")
|
|
print(
|
|
f"Expected memory usage per k point for parallel inversion: {memory_size * len(hamiltonians) * simulation_parameters['eset'] * 32} KB"
|
|
)
|
|
print(
|
|
f"Expected memory usage on root node: {len(np.array_split(kset, size)[0]) * memory_size * len(hamiltonians) * simulation_parameters['eset'] * 32 / 1024} MB"
|
|
)
|
|
print(
|
|
"================================================================================================================================================================"
|
|
)
|
|
|
|
# MPI barrier
|
|
comm.Barrier()
|
|
|
|
# make energy contour
|
|
cont = make_contour(
|
|
emin=simulation_parameters["ebot"],
|
|
enum=simulation_parameters["eset"],
|
|
p=simulation_parameters["esetp"],
|
|
)
|
|
eran = cont.ze
|
|
|
|
# sampling the integrand on the contour and the BZ
|
|
for k in kpcs[rank]:
|
|
# weight of k point in BZ integral
|
|
wk = wkset[rank]
|
|
|
|
# iterate over reference directions
|
|
for i, hamiltonian_orientation in enumerate(hamiltonians):
|
|
# calculate Hamiltonian and Overlap matrix in a given k point
|
|
H = hamiltonian_orientation["H"]
|
|
HK, SK = hsk(H, ss, dh.sc_off, k)
|
|
|
|
if simulation_parameters["parallel_solver_for_Gk"]:
|
|
Gk = parallel_Gk(HK, SK, eran, simulation_parameters["eset"])
|
|
else:
|
|
# solve Greens function sequentially for the energies, because of memory bound
|
|
Gk = sequential_GK(HK, SK, eran, simulation_parameters["eset"])
|
|
|
|
# saving this for total charge
|
|
if simulation_parameters["calculate_charge"]:
|
|
hamiltonian_orientation["GS_tmp"] += Gk @ SK * wk
|
|
|
|
# store the Greens function slice of the magnetic entities
|
|
for mag_ent in magnetic_entities:
|
|
idx = mag_ent["spin_box_indices"]
|
|
mag_ent["Gii_tmp"][i] += onsite_projection(Gk, idx, idx) * wk
|
|
|
|
for pair in pairs:
|
|
# add phase shift based on the cell difference
|
|
phase = np.exp(1j * 2 * np.pi * k @ pair["Ruc"].T)
|
|
|
|
# get the pair orbital sizes from the magnetic entities
|
|
ai = magnetic_entities[pair["ai"]]["spin_box_indices"]
|
|
aj = magnetic_entities[pair["aj"]]["spin_box_indices"]
|
|
|
|
# store the Greens function slice of the magnetic entities
|
|
pair["Gij_tmp"][i] += onsite_projection(Gk, ai, aj) * phase * wk
|
|
pair["Gji_tmp"][i] += onsite_projection(Gk, aj, ai) / phase * wk
|
|
|
|
# summ reduce partial results of mpi nodes
|
|
for i in range(len(hamiltonians)):
|
|
# for total charge calculation
|
|
if simulation_parameters["calculate_charge"]:
|
|
comm.Reduce(hamiltonians[i]["GS_tmp"], hamiltonians[i]["GS"], root=root_node)
|
|
|
|
for mag_ent in magnetic_entities:
|
|
comm.Reduce(mag_ent["Gii_tmp"][i], mag_ent["Gii"][i], root=root_node)
|
|
|
|
for pair in pairs:
|
|
comm.Reduce(pair["Gij_tmp"][i], pair["Gij"][i], root=root_node)
|
|
comm.Reduce(pair["Gji_tmp"][i], pair["Gji"][i], root=root_node)
|
|
|
|
if rank == root_node:
|
|
times["green_function_inversion_time"] = timer()
|
|
print(
|
|
f"Calculated Greens functions. Elapsed time: {times['green_function_inversion_time']} s"
|
|
)
|
|
print(
|
|
"================================================================================================================================================================"
|
|
)
|
|
|
|
|
|
if rank == root_node:
|
|
# Calculate total charge
|
|
if simulation_parameters["calculate_charge"]:
|
|
for hamiltonian in hamiltonians:
|
|
GS = hamiltonian["GS"]
|
|
traced = np.trace((GS), axis1=1, axis2=2)
|
|
simulation_parameters["charges"].append(int_de_ke(traced, cont.we))
|
|
|
|
# iterate over the magnetic entities
|
|
for tracker, mag_ent in enumerate(magnetic_entities):
|
|
# iterate over the quantization axes
|
|
for i, Gii in enumerate(mag_ent["Gii"]):
|
|
storage = []
|
|
# iterate over the first and second order local perturbations
|
|
for Vu1, Vu2 in zip(mag_ent["Vu1"][i], mag_ent["Vu2"][i]):
|
|
# The Szunyogh-Lichtenstein formula
|
|
traced = np.trace(
|
|
(Vu2 @ Gii + 0.5 * Gii @ Vu1 @ Gii), axis1=1, axis2=2
|
|
) # this is the on site projection
|
|
# evaluation of the contour integral
|
|
storage.append(int_de_ke(traced, cont.we))
|
|
# fill up the magnetic entities dictionary with the energies
|
|
magnetic_entities[tracker]["energies"].append(storage)
|
|
# convert to np array
|
|
magnetic_entities[tracker]["energies"] = np.array(
|
|
magnetic_entities[tracker]["energies"]
|
|
)
|
|
|
|
# iterate over the pairs
|
|
for tracker, pair in enumerate(pairs):
|
|
# iterate over the quantization axes
|
|
for i, (Gij, Gji) in enumerate(zip(pair["Gij"], pair["Gji"])):
|
|
site_i = magnetic_entities[pair["ai"]]
|
|
site_j = magnetic_entities[pair["aj"]]
|
|
storage = []
|
|
# iterate over the first order local perturbations in all possible orientations for the two sites
|
|
for Vui in site_i["Vu1"][i]:
|
|
for Vuj in site_j["Vu1"][i]:
|
|
# The Szunyogh-Lichtenstein formula
|
|
traced = np.trace(
|
|
(Vui @ Gij @ Vuj @ Gji), axis1=1, axis2=2
|
|
) # this is the on site projection
|
|
# evaluation of the contour integral
|
|
storage.append(int_de_ke(traced, cont.we))
|
|
# fill up the pairs dictionary with the energies
|
|
pairs[tracker]["energies"].append(storage)
|
|
# convert to np array
|
|
pairs[tracker]["energies"] = np.array(pairs[tracker]["energies"])
|
|
|
|
# calculate magnetic parameters
|
|
for mag_ent in magnetic_entities:
|
|
Kxx, Kyy, Kzz, consistency = calculate_anisotropy_tensor(mag_ent)
|
|
mag_ent["K"] = np.array([Kxx, Kyy, Kzz]) * sisl.unit_convert("eV", "meV")
|
|
mag_ent["K_consistency"] = consistency
|
|
|
|
for pair in pairs:
|
|
J_iso, J_S, D, J = calculate_exchange_tensor(pair)
|
|
pair["J_iso"] = J_iso * sisl.unit_convert("eV", "meV")
|
|
pair["J_S"] = J_S * sisl.unit_convert("eV", "meV")
|
|
pair["D"] = D * sisl.unit_convert("eV", "meV")
|
|
pair["J"] = J * sisl.unit_convert("eV", "meV")
|
|
|
|
times["end_time"] = timer()
|
|
print("\n\n\n\n\n")
|
|
print(
|
|
"##################################################################### GROGU OUTPUT #############################################################################"
|
|
)
|
|
|
|
print_parameters(simulation_parameters)
|
|
print_atoms_and_pairs(magnetic_entities, pairs)
|
|
print(
|
|
"################################################################################################################################################################"
|
|
)
|
|
print_runtime_information(times)
|
|
print("")
|
|
|
|
# remove unwanted stuff before saving
|
|
pairs, magnetic_entities = remove_clutter_for_save(pairs, magnetic_entities)
|
|
# create output dictionary with all the relevant data
|
|
results = dict(
|
|
parameters=simulation_parameters,
|
|
magnetic_entities=magnetic_entities,
|
|
pairs=pairs,
|
|
runtime=times,
|
|
)
|
|
|
|
# save results
|
|
save_pickle(simulation_parameters["outfile"], results)
|
|
|
|
print("\n\n\n\n\n")
|