{ "cells": [ { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "# use numpy number of threads one\n", "# print(threadpool_info())\n", "# from threadpoolctl import threadpool_info, threadpool_limits\n", "# user_api = threadpool_info()[0][\"user_api\"]\n", "# threadpool_limits(limits=1, user_api=user_api)\n", "# print(threadpool_info())" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0.14.3\n", "1.24.4\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "[Daniels-MacBook-Air.local:67208] shmem: mmap: an error occurred while determining whether or not /var/folders/yh/dx7xl94n3g52ts3td8qcxjcc0000gn/T//ompi.Daniels-MacBook-Air.501/jf.0/2623209472/sm_segment.Daniels-MacBook-Air.501.9c5b0000.0 could be created.\n" ] } ], "source": [ "from sys import getsizeof\n", "from timeit import default_timer as timer\n", "\n", "import sisl\n", "from src.grogupy import *\n", "from mpi4py import MPI\n", "import warnings\n", "\n", "# runtime information\n", "times = dict()\n", "times[\"start_time\"] = timer()\n", "########################\n", "# it works if data is in downloads folder\n", "########################\n", "sisl.__version__\n", "\n", "try:\n", " print(sisl.__version__)\n", "except:\n", " print(\"sisl version unknown.\")\n", "\n", "try:\n", " print(np.__version__)\n", "except:\n", " print(\"numpy version unknown.\")" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "({'infile': '/Users/danielpozsar/Downloads/nojij/Fe3GeTe2/monolayer/soc/lat3_791/Fe3GeTe2.fdf',\n", " 'outfile': './Fe3GeTe2_notebook',\n", " 'scf_xcf_orientation': array('[ 0 0 1 ]', dtype=' 1e-12:\n", " warnings.warn(\n", " f\"Exchange field has non negligible scalar part. Largest value is {max_xcfs}\"\n", " )\n", "\n", "if rank == root_node:\n", " times[\"H_and_XCF_time\"] = timer()\n", " print(\n", " f\"Hamiltonian and exchange field rotated. Elapsed time: {times['H_and_XCF_time']} s\"\n", " )\n", " print(\n", " \"================================================================================================================================================================\"\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "pairs, magnetic_entities = setup_pairs_and_magnetic_entities(\n", " magnetic_entities, pairs, dh, simulation_parameters\n", ")\n", "\n", "if rank == root_node:\n", " times[\"site_and_pair_dictionaries_time\"] = timer()\n", " print(\n", " f\"Site and pair dictionaries created. Elapsed time: {times['site_and_pair_dictionaries_time']} s\"\n", " )\n", " print(\n", " \"================================================================================================================================================================\"\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "kset = make_kset(\n", " dirs=simulation_parameters[\"kdirs\"], NUMK=simulation_parameters[\"kset\"]\n", ") # generate k space sampling\n", "wkset = np.ones(len(kset)) / len(kset) # generate weights for k points\n", "kpcs = np.array_split(kset, size) # split the k points based on MPI size\n", "\n", "if tqdm_imported:\n", " kpcs[root_node] = tqdm(kpcs[root_node], desc=\"k loop\")\n", "\n", "if rank == root_node:\n", " times[\"k_set_time\"] = timer()\n", " print(f\"k set created. Elapsed time: {times['k_set_time']} s\")\n", " print(\n", " \"================================================================================================================================================================\"\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# this will contain the three hamiltonians in the reference directions needed to calculate the energy variations upon rotation\n", "hamiltonians = []\n", "\n", "# iterate over the reference directions (quantization axes)\n", "for i, orient in enumerate(simulation_parameters[\"ref_xcf_orientations\"]):\n", " # obtain rotated exchange field\n", " R = RotMa2b(simulation_parameters[\"scf_xcf_orientation\"], orient[\"o\"])\n", " rot_XCF = np.einsum(\"ij,jklm->iklm\", R, XCF)\n", " rot_H_XCF = sum(\n", " [np.kron(rot_XCF[i], tau) for i, tau in enumerate([tau_x, tau_y, tau_z])]\n", " )\n", " rot_H_XCF_uc = rot_H_XCF[uc_in_sc_idx]\n", "\n", " # obtain total Hamiltonian with the rotated exchange field\n", " rot_H = hTRS + rot_H_XCF # equation 76\n", "\n", " hamiltonians.append(\n", " dict(\n", " orient=orient[\"o\"],\n", " H=rot_H,\n", " GS=np.zeros(\n", " (simulation_parameters[\"eset\"], rot_H.shape[1], rot_H.shape[2]),\n", " dtype=\"complex128\",\n", " ),\n", " GS_tmp=np.zeros(\n", " (simulation_parameters[\"eset\"], rot_H.shape[1], rot_H.shape[2]),\n", " dtype=\"complex128\",\n", " ),\n", " )\n", " ) # store orientation and rotated Hamiltonian\n", "\n", " # these are the rotations (for now) perpendicular to the quantization axis\n", " for u in orient[\"vw\"]:\n", " Tu = np.kron(np.eye(NO, dtype=int), tau_u(u)) # section 2.H\n", "\n", " Vu1, Vu2 = calc_Vu(rot_H_XCF_uc, Tu)\n", "\n", " for mag_ent in magnetic_entities:\n", " idx = mag_ent[\"spin_box_indeces\"]\n", " # fill up the perturbed potentials (for now) based on the on-site projections\n", " mag_ent[\"Vu1\"][i].append(Vu1[:, idx][idx, :])\n", " mag_ent[\"Vu2\"][i].append(Vu2[:, idx][idx, :])\n", "\n", "if rank == root_node:\n", " times[\"reference_rotations_time\"] = timer()\n", " print(\n", " f\"Rotations done perpendicular to quantization axis. Elapsed time: {times['reference_rotations_time']} s\"\n", " )\n", " print(\n", " \"================================================================================================================================================================\"\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if rank == root_node:\n", " print(\"Starting matrix inversions.\")\n", " print(f\"Total number of k points: {kset.shape[0]}\")\n", " print(f\"Number of energy samples per k point: {simulation_parameters['eset']}\")\n", " print(f\"Total number of directions: {len(hamiltonians)}\")\n", " print(\n", " f\"Total number of matrix inversions: {kset.shape[0] * len(hamiltonians) * simulation_parameters['eset']}\"\n", " )\n", " print(f\"The shape of the Hamiltonian and the Greens function is {NO}x{NO}={NO*NO}\")\n", " # https://stackoverflow.com/questions/70746660/how-to-predict-memory-requirement-for-np-linalg-inv\n", " # memory is O(64 n**2) for complex matrices\n", " memory_size = getsizeof(hamiltonians[0][\"H\"].base) / 1024\n", " print(\n", " f\"Memory taken by a single Hamiltonian is: {getsizeof(hamiltonians[0]['H'].base) / 1024} KB\"\n", " )\n", " print(f\"Expected memory usage per matrix inversion: {memory_size * 32} KB\")\n", " print(\n", " f\"Expected memory usage per k point for parallel inversion: {memory_size * len(hamiltonians) * simulation_parameters['eset'] * 32} KB\"\n", " )\n", " print(\n", " f\"Expected memory usage on root node: {len(np.array_split(kset, size)[0]) * memory_size * len(hamiltonians) * simulation_parameters['eset'] * 32 / 1024} MB\"\n", " )\n", " print(\n", " \"================================================================================================================================================================\"\n", " )\n", "\n", "comm.Barrier()\n", "# ----------------------------------------------------------------------\n", "\n", "# make energy contour\n", "# we are working in eV now !\n", "# and sisl shifts E_F to 0 !\n", "cont = make_contour(\n", " emin=simulation_parameters[\"ebot\"],\n", " enum=simulation_parameters[\"eset\"],\n", " p=simulation_parameters[\"esetp\"],\n", ")\n", "eran = cont.ze\n", "\n", "# ----------------------------------------------------------------------\n", "# sampling the integrand on the contour and the BZ\n", "for k in kpcs[rank]:\n", " wk = wkset[rank] # weight of k point in BZ integral\n", " # iterate over reference directions\n", " for i, hamiltonian_orientation in enumerate(hamiltonians):\n", " # calculate Greens function\n", " H = hamiltonian_orientation[\"H\"]\n", " HK, SK = hsk(H, ss, dh.sc_off, k)\n", "\n", " # solve Greens function sequentially for the energies, because of memory bound\n", " Gk = sequential_GK(HK, SK, eran, simulation_parameters[\"eset\"])\n", "\n", " # saving this for total charge\n", " hamiltonian_orientation[\"GS_tmp\"] += Gk @ SK * wk\n", "\n", " # store the Greens function slice of the magnetic entities (for now) based on the on-site projections\n", " for mag_ent in magnetic_entities:\n", " mag_ent[\"Gii_tmp\"][i] += (\n", " Gk[:, mag_ent[\"spin_box_indeces\"], :][:, :, mag_ent[\"spin_box_indeces\"]]\n", " * wk\n", " )\n", "\n", " for pair in pairs:\n", " # add phase shift based on the cell difference\n", " phase = np.exp(1j * 2 * np.pi * k @ pair[\"Ruc\"].T)\n", "\n", " # get the pair orbital sizes from the magnetic entities\n", " ai = magnetic_entities[pair[\"ai\"]][\"spin_box_indeces\"]\n", " aj = magnetic_entities[pair[\"aj\"]][\"spin_box_indeces\"]\n", "\n", " # store the Greens function slice of the magnetic entities (for now) based on the on-site projections\n", " pair[\"Gij_tmp\"][i] += Gk[:, ai][..., aj] * phase * wk\n", " pair[\"Gji_tmp\"][i] += Gk[:, aj][..., ai] / phase * wk\n", "\n", "# summ reduce partial results of mpi nodes\n", "for i in range(len(hamiltonians)):\n", " # for total charge\n", " comm.Reduce(hamiltonians[i][\"GS_tmp\"], hamiltonians[i][\"GS\"], root=root_node)\n", "\n", " for mag_ent in magnetic_entities:\n", " comm.Reduce(mag_ent[\"Gii_tmp\"][i], mag_ent[\"Gii\"][i], root=root_node)\n", "\n", " for pair in pairs:\n", " comm.Reduce(pair[\"Gij_tmp\"][i], pair[\"Gij\"][i], root=root_node)\n", " comm.Reduce(pair[\"Gji_tmp\"][i], pair[\"Gji\"][i], root=root_node)\n", "\n", "if rank == root_node:\n", " times[\"green_function_inversion_time\"] = timer()\n", " print(\n", " f\"Calculated Greens functions. Elapsed time: {times['green_function_inversion_time']} s\"\n", " )\n", " print(\n", " \"================================================================================================================================================================\"\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if rank == root_node:\n", " # Calculate total charge\n", " for hamiltonian in hamiltonians:\n", " GS = hamiltonian[\"GS\"]\n", " traced = np.trace((GS), axis1=1, axis2=2)\n", " print(\"Total charge: \", int_de_ke(traced, cont.we))\n", "\n", " # iterate over the magnetic entities\n", " for tracker, mag_ent in enumerate(magnetic_entities):\n", " # iterate over the quantization axes\n", " for i, Gii in enumerate(mag_ent[\"Gii\"]):\n", " storage = []\n", " # iterate over the first and second order local perturbations\n", " for Vu1, Vu2 in zip(mag_ent[\"Vu1\"][i], mag_ent[\"Vu2\"][i]):\n", " # The Szunyogh-Lichtenstein formula\n", " traced = np.trace((Vu2 @ Gii + 0.5 * Gii @ Vu1 @ Gii), axis1=1, axis2=2)\n", " # evaluation of the contour integral\n", " storage.append(int_de_ke(traced, cont.we))\n", "\n", " # fill up the magnetic entities dictionary with the energies\n", " magnetic_entities[tracker][\"energies\"].append(storage)\n", " # convert to np array\n", " magnetic_entities[tracker][\"energies\"] = np.array(\n", " magnetic_entities[tracker][\"energies\"]\n", " )\n", " print(\"Magnetic entities integrated.\")\n", "\n", " # iterate over the pairs\n", " for tracker, pair in enumerate(pairs):\n", " # iterate over the quantization axes\n", " for i, (Gij, Gji) in enumerate(zip(pair[\"Gij\"], pair[\"Gji\"])):\n", " site_i = magnetic_entities[pair[\"ai\"]]\n", " site_j = magnetic_entities[pair[\"aj\"]]\n", "\n", " storage = []\n", " # iterate over the first order local perturbations in all possible orientations for the two sites\n", " for Vui in site_i[\"Vu1\"][i]:\n", " for Vuj in site_j[\"Vu1\"][i]:\n", " # The Szunyogh-Lichtenstein formula\n", " traced = np.trace((Vui @ Gij @ Vuj @ Gji), axis1=1, axis2=2)\n", " # evaluation of the contour integral\n", " storage.append(int_de_ke(traced, cont.we))\n", " # fill up the pairs dictionary with the energies\n", " pairs[tracker][\"energies\"].append(storage)\n", " # convert to np array\n", " pairs[tracker][\"energies\"] = np.array(pairs[tracker][\"energies\"])\n", "\n", " print(\"Pairs integrated.\")\n", "\n", " # calculate magnetic parameters\n", " for mag_ent in magnetic_entities:\n", " Kxx, Kyy, Kzz, consistency = calculate_anisotropy_tensor(mag_ent)\n", " mag_ent[\"K\"] = np.array([Kxx, Kyy, Kzz]) * sisl.unit_convert(\"eV\", \"meV\")\n", " mag_ent[\"K_consistency\"] = consistency\n", "\n", " for pair in pairs:\n", " J_iso, J_S, D, J = calculate_exchange_tensor(pair)\n", " pair[\"J_iso\"] = J_iso * sisl.unit_convert(\"eV\", \"meV\")\n", " pair[\"J_S\"] = J_S * sisl.unit_convert(\"eV\", \"meV\")\n", " pair[\"D\"] = D * sisl.unit_convert(\"eV\", \"meV\")\n", " pair[\"J\"] = J * sisl.unit_convert(\"eV\", \"meV\")\n", "\n", " print(\"Magnetic parameters calculated.\")\n", "\n", " times[\"end_time\"] = timer()\n", " print(\n", " \"##################################################################### GROGU OUTPUT #############################################################################\"\n", " )\n", "\n", " print_parameters(simulation_parameters)\n", " print_atoms_and_pairs(magnetic_entities, pairs)\n", " print_runtime_information(times)\n", "\n", " pairs, magnetic_entities = remove_clutter_for_save(pairs, magnetic_entities)\n", " # create output dictionary with all the relevant data\n", " results = dict(\n", " parameters=simulation_parameters,\n", " magnetic_entities=magnetic_entities,\n", " pairs=pairs,\n", " runtime=times,\n", " )\n", "\n", " save_pickle(simulation_parameters[\"outfile\"], results)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "========================================\n", " \n", "Atom Angstrom\n", "# Label, x y z Sx Sy Sz #Q Lx Ly Lz Jx Jy Jz\n", "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n", "Te1 1.8955 1.0943 13.1698 -0.0000 0.0000 -0.1543 # 5.9345 -0.0000 0.0000 -0.0537 -0.0000 0.0000 -0.2080 \n", "Te2 1.8955 1.0943 7.4002 0.0000 -0.0000 -0.1543 # 5.9345 0.0000 -0.0000 -0.0537 0.0000 -0.0000 -0.2080 \n", "Ge3 -0.0000 2.1887 10.2850 0.0000 0.0000 -0.1605 # 3.1927 -0.0000 0.0000 0.0012 0.0000 0.0000 -0.1593 \n", "Fe4 -0.0000 0.0000 11.6576 0.0001 -0.0001 2.0466 # 8.3044 0.0000 -0.0000 0.1606 0.0001 -0.0001 2.2072 \n", "Fe5 -0.0000 0.0000 8.9124 -0.0001 0.0001 2.0466 # 8.3044 -0.0000 0.0000 0.1606 -0.0001 0.0001 2.2072 \n", "Fe6 1.8955 1.0944 10.2850 0.0000 0.0000 1.5824 # 8.3296 -0.0000 -0.0000 0.0520 -0.0000 0.0000 1.6344 \n", "==================================================================================================================================\n", " \n", "Exchange meV\n", "--------------------------------------------------------------------------------\n", "# at1 at2 i j k # d (Ang)\n", "--------------------------------------------------------------------------------\n", "Fe4 Fe5 0 0 0 # 2.7452\n", "Isotropic -82.0854\n", "DMI 0.12557 -0.00082199 6.9668e-08\n", "Symmetric-anisotropy -0.60237 -0.83842 -0.00032278 -1.2166e-05 -3.3923e-05\n", "--------------------------------------------------------------------------------\n", "Fe4 Fe6 0 0 0 # 2.5835\n", "Isotropic -41.9627\n", "DMI 1.1205 -1.9532 0.0018386\n", "Symmetric-anisotropy 0.26007 -0.00013243 0.12977 -0.069979 -0.042066\n", "--------------------------------------------------------------------------------\n", "\n", "\n", "On-site meV\n", "----------------------------------------\n", "Fe4\n", "0.16339\t0.16068\t0\t0\t0\t0\n", "========================================\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" } }, "nbformat": 4, "nbformat_minor": 2 }