You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
grogu/test.ipynb

748 lines
30 KiB

{
"cells": [
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"# use numpy number of threads one\n",
"# print(threadpool_info())\n",
"# from threadpoolctl import threadpool_info, threadpool_limits\n",
"# user_api = threadpool_info()[0][\"user_api\"]\n",
"# threadpool_limits(limits=1, user_api=user_api)\n",
"# print(threadpool_info())"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0.14.3\n",
"1.24.4\n"
]
}
],
"source": [
"from sys import getsizeof\n",
"from timeit import default_timer as timer\n",
"\n",
"import sisl\n",
"from src.grogupy import *\n",
"from mpi4py import MPI\n",
"import warnings\n",
"\n",
"# runtime information\n",
"times = dict()\n",
"times[\"start_time\"] = timer()\n",
"########################\n",
"# it works if data is in downloads folder\n",
"########################\n",
"sisl.__version__\n",
"\n",
"try:\n",
" print(sisl.__version__)\n",
"except:\n",
" print(\"sisl version unknown.\")\n",
"\n",
"try:\n",
" print(np.__version__)\n",
"except:\n",
" print(\"numpy version unknown.\")"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[ 0., 1., 0.],\n",
" [ 0., 0., 1.],\n",
" [ 100., 1000., 100.]])"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"asd = read_grogupy_fdf(\"input.fdf\")[0][\"ref_xcf_orientations\"][0][\"vw\"]\n",
"\n",
"asd2 = np.array([100, 1000, 100])\n",
"np.vstack((asd, asd2))"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {},
"outputs": [],
"source": [
"################################################################################\n",
"#################################### INPUT #####################################\n",
"################################################################################\n",
"path = (\n",
" \"/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf\"\n",
")\n",
"outfile = \"./Fe3GeTe2_notebook\"\n",
"\n",
"# this information needs to be given at the input!!\n",
"scf_xcf_orientation = np.array([0, 0, 1]) # z\n",
"# list of reference directions for around which we calculate the derivatives\n",
"# o is the quantization axis, v and w are two axes perpendicular to it\n",
"# at this moment the user has to supply o,v,w on the input.\n",
"# we can have some default for this\n",
"ref_xcf_orientations = [\n",
" dict(o=np.array([1, 0, 0]), vw=[np.array([0, 1, 0]), np.array([0, 0, 1])]),\n",
" dict(o=np.array([0, 1, 0]), vw=[np.array([1, 0, 0]), np.array([0, 0, 1])]),\n",
" dict(o=np.array([0, 0, 1]), vw=[np.array([1, 0, 0]), np.array([0, 1, 0])]),\n",
"]\n",
"magnetic_entities = [\n",
" dict(atom=3, l=2),\n",
" dict(atom=4, l=2),\n",
" dict(atom=5, l=2),\n",
"]\n",
"pairs = [\n",
" dict(ai=0, aj=1, Ruc=np.array([0, 0, 0])),\n",
" dict(ai=0, aj=2, Ruc=np.array([0, 0, 0])),\n",
" dict(ai=1, aj=2, Ruc=np.array([0, 0, 0])),\n",
" dict(ai=0, aj=2, Ruc=np.array([-1, -1, 0])),\n",
" dict(ai=1, aj=2, Ruc=np.array([-1, -1, 0])),\n",
" dict(ai=0, aj=2, Ruc=np.array([-1, 0, 0])),\n",
" dict(ai=1, aj=2, Ruc=np.array([-1, 0, 0])),\n",
" dict(ai=1, aj=2, Ruc=np.array([-2, 0, 0])),\n",
" dict(ai=1, aj=2, Ruc=np.array([-3, 0, 0])),\n",
"]\n",
"\n",
"# Brilloun zone sampling and Green function contour integral\n",
"kset = 100\n",
"kdirs = \"xy\"\n",
"ebot = -13\n",
"eset = 300\n",
"esetp = 1000\n",
"################################################################################\n",
"#################################### INPUT #####################################\n",
"################################################################################"
]
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"300\n",
"100\n",
"3\n"
]
}
],
"source": [
"eset_space = np.linspace(0, eset - 1, eset, dtype=int)\n",
"kset_space = np.linspace(0, kset - 1, kset, dtype=int)\n",
"orient_space = np.linspace(\n",
" 0, len(ref_xcf_orientations) - 1, len(ref_xcf_orientations), dtype=int\n",
")\n",
"print(len(eset_space))\n",
"print(len(kset_space))\n",
"print(len(orient_space))"
]
},
{
"cell_type": "code",
"execution_count": 77,
"metadata": {},
"outputs": [],
"source": [
"from itertools import product\n",
"\n",
"combinations = product(eset_space, eset_space, eset_space)\n",
"asd = np.array_split(np.array(list(combinations)), 128)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================================================================================================================================================\n",
"SLURM job ID not found.\n",
"Input file: \n",
"/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf\n",
"Output file: \n",
"./Fe3GeTe2_notebook.pickle\n",
"Number of nodes in the parallel cluster: 1\n",
"================================================================================================================================================================\n",
"Cell [Ang]: \n",
"[[ 3.79100000e+00 0.00000000e+00 0.00000000e+00]\n",
" [-1.89550000e+00 3.28310231e+00 0.00000000e+00]\n",
" [ 1.25954923e-15 2.18160327e-15 2.05700000e+01]]\n",
"================================================================================================================================================================\n",
"DFT axis: \n",
"[0 0 1]\n",
"Quantization axis and perpendicular rotation directions:\n",
"[1 0 0] --» [array([0, 1, 0]), array([0, 0, 1])]\n",
"[0 1 0] --» [array([1, 0, 0]), array([0, 0, 1])]\n",
"[0 0 1] --» [array([1, 0, 0]), array([0, 1, 0])]\n",
"================================================================================================================================================================\n",
"Parameters for the contour integral:\n",
"Number of k points: 3\n",
"k point directions: xy\n",
"Ebot: -13\n",
"Eset: 300\n",
"Esetp: 1000\n",
"================================================================================================================================================================\n",
"Setup done. Elapsed time: 3.275603625 s\n",
"================================================================================================================================================================\n"
]
}
],
"source": [
"# MPI parameters\n",
"comm = MPI.COMM_WORLD\n",
"size = comm.Get_size()\n",
"rank = comm.Get_rank()\n",
"root_node = 0\n",
"\n",
"# rename outfile\n",
"if not outfile.endswith(\".pickle\"):\n",
" outfile += \".pickle\"\n",
"\n",
"simulation_parameters = dict(\n",
" infile=path,\n",
" outfile=outfile,\n",
" scf_xcf_orientation=scf_xcf_orientation,\n",
" ref_xcf_orientations=ref_xcf_orientations,\n",
" kset=kset,\n",
" kdirs=kdirs,\n",
" ebot=ebot,\n",
" eset=eset,\n",
" esetp=esetp,\n",
" parallel_size=size,\n",
")\n",
"\n",
"# if ebot is not given put it 0.1 eV under the smallest energy\n",
"if simulation_parameters[\"ebot\"] is None:\n",
" try:\n",
" eigfile = simulation_parameters[\"infile\"][:-3] + \"EIG\"\n",
" simulation_parameters[\"ebot\"] = read_siesta_emin(eigfile) - 0.1\n",
" except:\n",
" print(\"Could not determine ebot.\")\n",
" print(\"Parameter was not given and .EIG file was not found.\")\n",
"# digestion of the input\n",
"# read sile\n",
"fdf = sisl.get_sile(simulation_parameters[\"infile\"])\n",
"# read in hamiltonian\n",
"dh = fdf.read_hamiltonian()\n",
"simulation_parameters[\"cell\"] = fdf.read_geometry().cell\n",
"\n",
"# unit cell index\n",
"uc_in_sc_idx = dh.lattice.sc_index([0, 0, 0])\n",
"\n",
"if rank == root_node:\n",
" print_parameters(simulation_parameters)\n",
" times[\"setup_time\"] = timer()\n",
" print(f\"Setup done. Elapsed time: {times['setup_time']} s\")\n",
" print(\n",
" \"================================================================================================================================================================\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hamiltonian and exchange field rotated. Elapsed time: 3.976168875 s\n",
"================================================================================================================================================================\n"
]
}
],
"source": [
"hh, ss = build_hh_ss(dh)\n",
"NO = dh.no\n",
"\n",
"# symmetrizing Hamiltonian and overlap matrix to make them hermitian\n",
"for i in range(dh.lattice.sc_off.shape[0]):\n",
" j = dh.lattice.sc_index(-dh.lattice.sc_off[i])\n",
" h1, h1d = hh[i], hh[j]\n",
" hh[i], hh[j] = (h1 + h1d.T.conj()) / 2, (h1d + h1.T.conj()) / 2\n",
" s1, s1d = ss[i], ss[j]\n",
" ss[i], ss[j] = (s1 + s1d.T.conj()) / 2, (s1d + s1.T.conj()) / 2\n",
"\n",
"\n",
"# identifying TRS and TRB parts of the Hamiltonian\n",
"TAUY = np.kron(np.eye(NO), TAU_Y)\n",
"hTR = np.array([TAUY @ hh[i].conj() @ TAUY for i in range(dh.lattice.nsc.prod())])\n",
"hTRS = (hh + hTR) / 2\n",
"hTRB = (hh - hTR) / 2\n",
"\n",
"# extracting the exchange field\n",
"traced = [spin_tracer(hTRB[i]) for i in range(dh.lattice.nsc.prod())] # equation 77\n",
"XCF = np.array(\n",
" [\n",
" np.array([f[\"x\"] / 2 for f in traced]),\n",
" np.array([f[\"y\"] / 2 for f in traced]),\n",
" np.array([f[\"z\"] / 2 for f in traced]),\n",
" ]\n",
") # equation 77\n",
"\n",
"\n",
"# Check if exchange field has scalar part\n",
"max_xcfs = abs(np.array(np.array([f[\"c\"] / 2 for f in traced]))).max()\n",
"if max_xcfs > 1e-12:\n",
" warnings.warn(\n",
" f\"Exchange field has non negligible scalar part. Largest value is {max_xcfs}\"\n",
" )\n",
"\n",
"if rank == root_node:\n",
" times[\"H_and_XCF_time\"] = timer()\n",
" print(\n",
" f\"Hamiltonian and exchange field rotated. Elapsed time: {times['H_and_XCF_time']} s\"\n",
" )\n",
" print(\n",
" \"================================================================================================================================================================\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Site and pair dictionaries created. Elapsed time: 4.091688166 s\n",
"================================================================================================================================================================\n"
]
}
],
"source": [
"pairs, magnetic_entities = setup_pairs_and_magnetic_entities(\n",
" magnetic_entities, pairs, dh, simulation_parameters\n",
")\n",
"\n",
"if rank == root_node:\n",
" times[\"site_and_pair_dictionaries_time\"] = timer()\n",
" print(\n",
" f\"Site and pair dictionaries created. Elapsed time: {times['site_and_pair_dictionaries_time']} s\"\n",
" )\n",
" print(\n",
" \"================================================================================================================================================================\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"k set created. Elapsed time: 22.801980041 s\n",
"================================================================================================================================================================\n"
]
}
],
"source": [
"kset = make_kset(\n",
" dirs=simulation_parameters[\"kdirs\"], NUMK=simulation_parameters[\"kset\"]\n",
") # generate k space sampling\n",
"wkset = np.ones(len(kset)) / len(kset) # generate weights for k points\n",
"kpcs = np.array_split(kset, size) # split the k points based on MPI size\n",
"\n",
"\n",
"if rank == root_node:\n",
" times[\"k_set_time\"] = timer()\n",
" print(f\"k set created. Elapsed time: {times['k_set_time']} s\")\n",
" print(\n",
" \"================================================================================================================================================================\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"9"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# make energy contour\n",
"# we are working in eV now !\n",
"# and sisl shifts E_F to 0 !\n",
"cont = make_contour(\n",
" emin=simulation_parameters[\"ebot\"],\n",
" enum=simulation_parameters[\"eset\"],\n",
" p=simulation_parameters[\"esetp\"],\n",
")\n",
"eran = cont.ze"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# this will contain the three hamiltonians in the reference directions needed to calculate the energy variations upon rotation\n",
"hamiltonians = []\n",
"\n",
"# iterate over the reference directions (quantization axes)\n",
"for i, orient in enumerate(simulation_parameters[\"ref_xcf_orientations\"]):\n",
" # obtain rotated exchange field\n",
" R = RotMa2b(simulation_parameters[\"scf_xcf_orientation\"], orient[\"o\"])\n",
" rot_XCF = np.einsum(\"ij,jklm->iklm\", R, XCF)\n",
" rot_H_XCF = sum(\n",
" [np.kron(rot_XCF[i], tau) for i, tau in enumerate([tau_x, tau_y, tau_z])]\n",
" )\n",
" rot_H_XCF_uc = rot_H_XCF[uc_in_sc_idx]\n",
"\n",
" # obtain total Hamiltonian with the rotated exchange field\n",
" rot_H = hTRS + rot_H_XCF # equation 76\n",
"\n",
" hamiltonians.append(\n",
" dict(\n",
" orient=orient[\"o\"],\n",
" H=rot_H,\n",
" GS=np.zeros(\n",
" (simulation_parameters[\"eset\"], rot_H.shape[1], rot_H.shape[2]),\n",
" dtype=\"complex128\",\n",
" ),\n",
" GS_tmp=np.zeros(\n",
" (simulation_parameters[\"eset\"], rot_H.shape[1], rot_H.shape[2]),\n",
" dtype=\"complex128\",\n",
" ),\n",
" )\n",
" ) # store orientation and rotated Hamiltonian\n",
"\n",
" # these are the rotations (for now) perpendicular to the quantization axis\n",
" for u in orient[\"vw\"]:\n",
" Tu = np.kron(np.eye(NO, dtype=int), tau_u(u)) # section 2.H\n",
"\n",
" Vu1, Vu2 = calc_Vu(rot_H_XCF_uc, Tu)\n",
"\n",
" for mag_ent in magnetic_entities:\n",
" idx = mag_ent[\"spin_box_indeces\"]\n",
" # fill up the perturbed potentials (for now) based on the on-site projections\n",
" mag_ent[\"Vu1\"][i].append(Vu1[:, idx][idx, :])\n",
" mag_ent[\"Vu2\"][i].append(Vu2[:, idx][idx, :])\n",
"\n",
"if rank == root_node:\n",
" times[\"reference_rotations_time\"] = timer()\n",
" print(\n",
" f\"Rotations done perpendicular to quantization axis. Elapsed time: {times['reference_rotations_time']} s\"\n",
" )\n",
" print(\n",
" \"================================================================================================================================================================\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if rank == root_node:\n",
" print(\"Starting matrix inversions.\")\n",
" print(f\"Total number of k points: {kset.shape[0]}\")\n",
" print(f\"Number of energy samples per k point: {simulation_parameters['eset']}\")\n",
" print(f\"Total number of directions: {len(hamiltonians)}\")\n",
" print(\n",
" f\"Total number of matrix inversions: {kset.shape[0] * len(hamiltonians) * simulation_parameters['eset']}\"\n",
" )\n",
" print(f\"The shape of the Hamiltonian and the Greens function is {NO}x{NO}={NO*NO}\")\n",
" # https://stackoverflow.com/questions/70746660/how-to-predict-memory-requirement-for-np-linalg-inv\n",
" # memory is O(64 n**2) for complex matrices\n",
" memory_size = getsizeof(hamiltonians[0][\"H\"].base) / 1024\n",
" print(\n",
" f\"Memory taken by a single Hamiltonian is: {getsizeof(hamiltonians[0]['H'].base) / 1024} KB\"\n",
" )\n",
" print(f\"Expected memory usage per matrix inversion: {memory_size * 32} KB\")\n",
" print(\n",
" f\"Expected memory usage per k point for parallel inversion: {memory_size * len(hamiltonians) * simulation_parameters['eset'] * 32} KB\"\n",
" )\n",
" print(\n",
" f\"Expected memory usage on root node: {len(np.array_split(kset, size)[0]) * memory_size * len(hamiltonians) * simulation_parameters['eset'] * 32 / 1024} MB\"\n",
" )\n",
" print(\n",
" \"================================================================================================================================================================\"\n",
" )\n",
"\n",
"comm.Barrier()\n",
"# ----------------------------------------------------------------------\n",
"\n",
"# make energy contour\n",
"# we are working in eV now !\n",
"# and sisl shifts E_F to 0 !\n",
"cont = make_contour(\n",
" emin=simulation_parameters[\"ebot\"],\n",
" enum=simulation_parameters[\"eset\"],\n",
" p=simulation_parameters[\"esetp\"],\n",
")\n",
"eran = cont.ze\n",
"\n",
"# ----------------------------------------------------------------------\n",
"# sampling the integrand on the contour and the BZ\n",
"for k in kpcs[rank]:\n",
" wk = wkset[rank] # weight of k point in BZ integral\n",
" # iterate over reference directions\n",
" for i, hamiltonian_orientation in enumerate(hamiltonians):\n",
" # calculate Greens function\n",
" H = hamiltonian_orientation[\"H\"]\n",
" HK, SK = hsk(H, ss, dh.sc_off, k)\n",
"\n",
" # solve Greens function sequentially for the energies, because of memory bound\n",
" Gk = sequential_GK(HK, SK, eran, simulation_parameters[\"eset\"])\n",
"\n",
" # saving this for total charge\n",
" hamiltonian_orientation[\"GS_tmp\"] += Gk @ SK * wk\n",
"\n",
" # store the Greens function slice of the magnetic entities (for now) based on the on-site projections\n",
" for mag_ent in magnetic_entities:\n",
" mag_ent[\"Gii_tmp\"][i] += (\n",
" Gk[:, mag_ent[\"spin_box_indeces\"], :][:, :, mag_ent[\"spin_box_indeces\"]]\n",
" * wk\n",
" )\n",
"\n",
" for pair in pairs:\n",
" # add phase shift based on the cell difference\n",
" phase = np.exp(1j * 2 * np.pi * k @ pair[\"Ruc\"].T)\n",
"\n",
" # get the pair orbital sizes from the magnetic entities\n",
" ai = magnetic_entities[pair[\"ai\"]][\"spin_box_indeces\"]\n",
" aj = magnetic_entities[pair[\"aj\"]][\"spin_box_indeces\"]\n",
"\n",
" # store the Greens function slice of the magnetic entities (for now) based on the on-site projections\n",
" pair[\"Gij_tmp\"][i] += Gk[:, ai][..., aj] * phase * wk\n",
" pair[\"Gji_tmp\"][i] += Gk[:, aj][..., ai] / phase * wk\n",
"\n",
"# summ reduce partial results of mpi nodes\n",
"for i in range(len(hamiltonians)):\n",
" # for total charge\n",
" comm.Reduce(hamiltonians[i][\"GS_tmp\"], hamiltonians[i][\"GS\"], root=root_node)\n",
"\n",
" for mag_ent in magnetic_entities:\n",
" comm.Reduce(mag_ent[\"Gii_tmp\"][i], mag_ent[\"Gii\"][i], root=root_node)\n",
"\n",
" for pair in pairs:\n",
" comm.Reduce(pair[\"Gij_tmp\"][i], pair[\"Gij\"][i], root=root_node)\n",
" comm.Reduce(pair[\"Gji_tmp\"][i], pair[\"Gji\"][i], root=root_node)\n",
"\n",
"if rank == root_node:\n",
" times[\"green_function_inversion_time\"] = timer()\n",
" print(\n",
" f\"Calculated Greens functions. Elapsed time: {times['green_function_inversion_time']} s\"\n",
" )\n",
" print(\n",
" \"================================================================================================================================================================\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if rank == root_node:\n",
" # Calculate total charge\n",
" for hamiltonian in hamiltonians:\n",
" GS = hamiltonian[\"GS\"]\n",
" traced = np.trace((GS), axis1=1, axis2=2)\n",
" print(\"Total charge: \", int_de_ke(traced, cont.we))\n",
"\n",
" # iterate over the magnetic entities\n",
" for tracker, mag_ent in enumerate(magnetic_entities):\n",
" # iterate over the quantization axes\n",
" for i, Gii in enumerate(mag_ent[\"Gii\"]):\n",
" storage = []\n",
" # iterate over the first and second order local perturbations\n",
" for Vu1, Vu2 in zip(mag_ent[\"Vu1\"][i], mag_ent[\"Vu2\"][i]):\n",
" # The Szunyogh-Lichtenstein formula\n",
" traced = np.trace((Vu2 @ Gii + 0.5 * Gii @ Vu1 @ Gii), axis1=1, axis2=2)\n",
" # evaluation of the contour integral\n",
" storage.append(int_de_ke(traced, cont.we))\n",
"\n",
" # fill up the magnetic entities dictionary with the energies\n",
" magnetic_entities[tracker][\"energies\"].append(storage)\n",
" # convert to np array\n",
" magnetic_entities[tracker][\"energies\"] = np.array(\n",
" magnetic_entities[tracker][\"energies\"]\n",
" )\n",
" print(\"Magnetic entities integrated.\")\n",
"\n",
" # iterate over the pairs\n",
" for tracker, pair in enumerate(pairs):\n",
" # iterate over the quantization axes\n",
" for i, (Gij, Gji) in enumerate(zip(pair[\"Gij\"], pair[\"Gji\"])):\n",
" site_i = magnetic_entities[pair[\"ai\"]]\n",
" site_j = magnetic_entities[pair[\"aj\"]]\n",
"\n",
" storage = []\n",
" # iterate over the first order local perturbations in all possible orientations for the two sites\n",
" for Vui in site_i[\"Vu1\"][i]:\n",
" for Vuj in site_j[\"Vu1\"][i]:\n",
" # The Szunyogh-Lichtenstein formula\n",
" traced = np.trace((Vui @ Gij @ Vuj @ Gji), axis1=1, axis2=2)\n",
" # evaluation of the contour integral\n",
" storage.append(int_de_ke(traced, cont.we))\n",
" # fill up the pairs dictionary with the energies\n",
" pairs[tracker][\"energies\"].append(storage)\n",
" # convert to np array\n",
" pairs[tracker][\"energies\"] = np.array(pairs[tracker][\"energies\"])\n",
"\n",
" print(\"Pairs integrated.\")\n",
"\n",
" # calculate magnetic parameters\n",
" for mag_ent in magnetic_entities:\n",
" Kxx, Kyy, Kzz, consistency = calculate_anisotropy_tensor(mag_ent)\n",
" mag_ent[\"K\"] = np.array([Kxx, Kyy, Kzz]) * sisl.unit_convert(\"eV\", \"meV\")\n",
" mag_ent[\"K_consistency\"] = consistency\n",
"\n",
" for pair in pairs:\n",
" J_iso, J_S, D, J = calculate_exchange_tensor(pair)\n",
" pair[\"J_iso\"] = J_iso * sisl.unit_convert(\"eV\", \"meV\")\n",
" pair[\"J_S\"] = J_S * sisl.unit_convert(\"eV\", \"meV\")\n",
" pair[\"D\"] = D * sisl.unit_convert(\"eV\", \"meV\")\n",
" pair[\"J\"] = J * sisl.unit_convert(\"eV\", \"meV\")\n",
"\n",
" print(\"Magnetic parameters calculated.\")\n",
"\n",
" times[\"end_time\"] = timer()\n",
" print(\n",
" \"##################################################################### GROGU OUTPUT #############################################################################\"\n",
" )\n",
"\n",
" print_parameters(simulation_parameters)\n",
" print_atoms_and_pairs(magnetic_entities, pairs)\n",
" print_runtime_information(times)\n",
"\n",
" pairs, magnetic_entities = remove_clutter_for_save(pairs, magnetic_entities)\n",
" # create output dictionary with all the relevant data\n",
" results = dict(\n",
" parameters=simulation_parameters,\n",
" magnetic_entities=magnetic_entities,\n",
" pairs=pairs,\n",
" runtime=times,\n",
" )\n",
"\n",
" save_pickle(simulation_parameters[\"outfile\"], results)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"========================================\n",
" \n",
"Atom Angstrom\n",
"# Label, x y z Sx Sy Sz #Q Lx Ly Lz Jx Jy Jz\n",
"--------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n",
"Te1 1.8955 1.0943 13.1698 -0.0000 0.0000 -0.1543 # 5.9345 -0.0000 0.0000 -0.0537 -0.0000 0.0000 -0.2080 \n",
"Te2 1.8955 1.0943 7.4002 0.0000 -0.0000 -0.1543 # 5.9345 0.0000 -0.0000 -0.0537 0.0000 -0.0000 -0.2080 \n",
"Ge3 -0.0000 2.1887 10.2850 0.0000 0.0000 -0.1605 # 3.1927 -0.0000 0.0000 0.0012 0.0000 0.0000 -0.1593 \n",
"Fe4 -0.0000 0.0000 11.6576 0.0001 -0.0001 2.0466 # 8.3044 0.0000 -0.0000 0.1606 0.0001 -0.0001 2.2072 \n",
"Fe5 -0.0000 0.0000 8.9124 -0.0001 0.0001 2.0466 # 8.3044 -0.0000 0.0000 0.1606 -0.0001 0.0001 2.2072 \n",
"Fe6 1.8955 1.0944 10.2850 0.0000 0.0000 1.5824 # 8.3296 -0.0000 -0.0000 0.0520 -0.0000 0.0000 1.6344 \n",
"==================================================================================================================================\n",
" \n",
"Exchange meV\n",
"--------------------------------------------------------------------------------\n",
"# at1 at2 i j k # d (Ang)\n",
"--------------------------------------------------------------------------------\n",
"Fe4 Fe5 0 0 0 # 2.7452\n",
"Isotropic -82.0854\n",
"DMI 0.12557 -0.00082199 6.9668e-08\n",
"Symmetric-anisotropy -0.60237 -0.83842 -0.00032278 -1.2166e-05 -3.3923e-05\n",
"--------------------------------------------------------------------------------\n",
"Fe4 Fe6 0 0 0 # 2.5835\n",
"Isotropic -41.9627\n",
"DMI 1.1205 -1.9532 0.0018386\n",
"Symmetric-anisotropy 0.26007 -0.00013243 0.12977 -0.069979 -0.042066\n",
"--------------------------------------------------------------------------------\n",
"\n",
"\n",
"On-site meV\n",
"----------------------------------------\n",
"Fe4\n",
"0.16339\t0.16068\t0\t0\t0\t0\n",
"========================================\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}