SIRIUS 7.5.0
Electronic structure library and applications
symmetrize_occupation_matrix.hpp
Go to the documentation of this file.
1// Copyright (c) 2013-2023 Anton Kozhevnikov, Thomas Schulthess
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
5// the following conditions are met:
6//
7// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
8// following disclaimer.
9// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
10// and the following disclaimer in the documentation and/or other materials provided with the distribution.
11//
12// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
13// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
15// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
17// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
18// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19
20/** \file symmetrize_occupation_matrix.hpp
21 *
22 * \brief Symmetrize occupation matrix of the LDA+U method.
23 */
24
25#ifndef __SYMMETRIZE_OCCUPATION_MATRIX_HPP__
26#define __SYMMETRIZE_OCCUPATION_MATRIX_HPP__
27
29
30namespace sirius {
31
32inline void
33symmetrize_occupation_matrix(Occupation_matrix& om__)
34{
35 auto& ctx = om__.ctx();
36 auto& uc = ctx.unit_cell();
37
38 if (!ctx.hubbard_correction()) {
39 return;
40 }
41
42 auto& sym = uc.symmetry();
43 const double f = 1.0 / sym.size();
44 std::vector<sddk::mdarray<std::complex<double>, 3>> local_tmp;
45
46 local_tmp.resize(om__.local().size());
47
48 for (int at_lvl = 0; at_lvl < static_cast<int>(om__.local().size()); at_lvl++) {
49 const int ia = om__.atomic_orbitals(at_lvl).first;
50 auto const& atom_type = uc.atom(ia).type();
51 /* We can skip the symmetrization for this atomic level since it does not contribute
52 * to the Hubbard correction (or U = 0) */
53 if (atom_type.lo_descriptor_hub(om__.atomic_orbitals(at_lvl).second).use_for_calculation()) {
54 local_tmp[at_lvl] =
55 sddk::mdarray<std::complex<double>, 3>(om__.local(at_lvl).size(0), om__.local(at_lvl).size(1), 4);
56 copy(om__.local(at_lvl), local_tmp[at_lvl]);
57 }
58 }
59
60 for (int at_lvl = 0; at_lvl < static_cast<int>(om__.local().size()); at_lvl++) {
61 const int ia = om__.atomic_orbitals(at_lvl).first;
62 const auto& atom = uc.atom(ia);
63 om__.local(at_lvl).zero();
64 /* We can skip the symmetrization for this atomic level since it does not contribute
65 * to the Hubbard correction (or U = 0) */
66 if (atom.type().lo_descriptor_hub(om__.atomic_orbitals(at_lvl).second).use_for_calculation()) {
67 const int il = atom.type().lo_descriptor_hub(om__.atomic_orbitals(at_lvl).second).l();
68 const int lmmax_at = 2 * il + 1;
69 // local_[at_lvl].zero();
70 sddk::mdarray<std::complex<double>, 3> dm_ia(lmmax_at, lmmax_at, 4);
71 for (int isym = 0; isym < sym.size(); isym++) {
72 int pr = sym[isym].spg_op.proper;
73 auto eang = sym[isym].spg_op.euler_angles;
74 auto rotm = sht::rotation_matrix<double>(4, eang, pr);
75 auto spin_rot_su2 = rotation_matrix_su2(sym[isym].spin_rotation);
76
77 int iap = sym[isym].spg_op.inv_sym_atom[ia];
78 dm_ia.zero();
79
80 int at_lvl1 =
81 om__.find_orbital_index(iap, atom.type().lo_descriptor_hub(om__.atomic_orbitals(at_lvl).second).n(),
82 atom.type().lo_descriptor_hub(om__.atomic_orbitals(at_lvl).second).l());
83
84 for (int ispn = 0; ispn < (ctx.num_mag_dims() == 3 ? 4 : ctx.num_spins()); ispn++) {
85 for (int m1 = 0; m1 < lmmax_at; m1++) {
86 for (int m2 = 0; m2 < lmmax_at; m2++) {
87 for (int m1p = 0; m1p < lmmax_at; m1p++) {
88 for (int m2p = 0; m2p < lmmax_at; m2p++) {
89 dm_ia(m1, m2, ispn) +=
90 rotm[il](m1, m1p) * rotm[il](m2, m2p) * local_tmp[at_lvl1](m1p, m2p, ispn) * f;
91 }
92 }
93 }
94 }
95 }
96
97 if (ctx.num_mag_dims() == 0) {
98 for (int m1 = 0; m1 < lmmax_at; m1++) {
99 for (int m2 = 0; m2 < lmmax_at; m2++) {
100 om__.local(at_lvl)(m1, m2, 0) += dm_ia(m1, m2, 0);
101 }
102 }
103 }
104
105 if (ctx.num_mag_dims() == 1) {
106 int const map_s[3][2] = {{0, 0}, {1, 1}, {0, 1}};
107 for (int j = 0; j < 2; j++) {
108 int s1 = map_s[j][0];
109 int s2 = map_s[j][1];
110
111 for (int m1 = 0; m1 < lmmax_at; m1++) {
112 for (int m2 = 0; m2 < lmmax_at; m2++) {
113 std::complex<double> dm[2][2] = {{dm_ia(m1, m2, 0), 0}, {0, dm_ia(m1, m2, 1)}};
114
115 for (int s1p = 0; s1p < 2; s1p++) {
116 for (int s2p = 0; s2p < 2; s2p++) {
117 om__.local(at_lvl)(m1, m2, j) +=
118 dm[s1p][s2p] * spin_rot_su2(s1, s1p) * std::conj(spin_rot_su2(s2, s2p));
119 }
120 }
121 }
122 }
123 }
124 }
125
126 if (ctx.num_mag_dims() == 3) {
127 int s_idx[2][2] = {{0, 3}, {2, 1}};
128 for (int m1 = 0; m1 < lmmax_at; m1++) {
129 for (int m2 = 0; m2 < lmmax_at; m2++) {
130
131 std::complex<double> dm[2][2];
132 std::complex<double> dm1[2][2] = {{0.0, 0.0}, {0.0, 0.0}};
133 for (int s1 = 0; s1 < ctx.num_spins(); s1++) {
134 for (int s2 = 0; s2 < ctx.num_spins(); s2++) {
135 dm[s1][s2] = dm_ia(m1, m2, s_idx[s1][s2]);
136 }
137 }
138
139 for (int i = 0; i < 2; i++) {
140 for (int j = 0; j < 2; j++) {
141 for (int s1p = 0; s1p < 2; s1p++) {
142 for (int s2p = 0; s2p < 2; s2p++) {
143 dm1[i][j] +=
144 dm[s1p][s2p] * spin_rot_su2(i, s1p) * std::conj(spin_rot_su2(j, s2p));
145 }
146 }
147 }
148 }
149
150 for (int s1 = 0; s1 < ctx.num_spins(); s1++) {
151 for (int s2 = 0; s2 < ctx.num_spins(); s2++) {
152 om__.local(at_lvl)(m1, m2, s_idx[s1][s2]) += dm1[s1][s2];
153 }
154 }
155 }
156 }
157 }
158 }
159 }
160 }
161
162 if (ctx.cfg().hubbard().nonlocal().size() && ctx.num_mag_dims() == 3) {
163 RTE_THROW("non-collinear nonlocal occupancy symmetrization is not implemented");
164 }
165
166 /* a pair of "total number, offests" for the Hubbard orbitals indexing */
167 auto r = uc.num_hubbard_wf();
168
169 for (int i = 0; i < static_cast<int>(ctx.cfg().hubbard().nonlocal().size()); i++) {
170 auto nl = ctx.cfg().hubbard().nonlocal(i);
171 int ia = nl.atom_pair()[0];
172 int ja = nl.atom_pair()[1];
173 int il = nl.l()[0];
174 int jl = nl.l()[1];
175 int n1 = nl.n()[0];
176 int n2 = nl.n()[1];
177 int ib = 2 * il + 1;
178 int jb = 2 * jl + 1;
179 auto T = nl.T();
180 om__.nonlocal(i).zero();
181 for (int isym = 0; isym < sym.size(); isym++) {
182 int pr = sym[isym].spg_op.proper;
183 auto eang = sym[isym].spg_op.euler_angles;
184 auto rotm = sht::rotation_matrix<double>(4, eang, pr);
185 auto spin_rot_su2 = rotation_matrix_su2(sym[isym].spin_rotation);
186
187 int iap = sym[isym].spg_op.inv_sym_atom[ia];
188 int jap = sym[isym].spg_op.inv_sym_atom[ja];
189
190 auto Ttot = sym[isym].spg_op.inv_sym_atom_T[ja] - sym[isym].spg_op.inv_sym_atom_T[ia] +
191 dot(sym[isym].spg_op.invR, r3::vector<int>(T));
192
193 /* we must search for the right hubbard subspace since we may have
194 * multiple orbitals involved in the hubbard correction */
195
196 /* NOTE : the atom order is important here. */
197 int at1_lvl = om__.find_orbital_index(iap, n1, il);
198 int at2_lvl = om__.find_orbital_index(jap, n2, jl);
199 auto const& occ_mtrx = om__.occ_mtrx_T(Ttot);
200
201 sddk::mdarray<std::complex<double>, 3> dm_ia_ja(2 * il + 1, 2 * jl + 1, ctx.num_spins());
202 dm_ia_ja.zero();
203 /* apply spatial rotation */
204 for (int ispn = 0; ispn < ctx.num_spins(); ispn++) {
205 for (int m1 = 0; m1 < ib; m1++) {
206 for (int m2 = 0; m2 < jb; m2++) {
207 for (int m1p = 0; m1p < ib; m1p++) {
208 for (int m2p = 0; m2p < jb; m2p++) {
209 dm_ia_ja(m1, m2, ispn) +=
210 rotm[il](m1, m1p) * rotm[jl](m2, m2p) *
211 occ_mtrx(om__.offset(at1_lvl) + m1p, om__.offset(at2_lvl) + m2p, ispn) * f;
212 }
213 }
214 }
215 }
216 }
217
218 if (ctx.num_mag_dims() == 0) {
219 for (int m1 = 0; m1 < ib; m1++) {
220 for (int m2 = 0; m2 < jb; m2++) {
221 om__.nonlocal(i)(m1, m2, 0) += dm_ia_ja(m1, m2, 0);
222 }
223 }
224 }
225 if (ctx.num_mag_dims() == 1) {
226 int const map_s[3][2] = {{0, 0}, {1, 1}, {0, 1}};
227 for (int j = 0; j < 2; j++) {
228 int s1 = map_s[j][0];
229 int s2 = map_s[j][1];
230
231 for (int m1 = 0; m1 < ib; m1++) {
232 for (int m2 = 0; m2 < jb; m2++) {
233 std::complex<double> dm[2][2] = {{dm_ia_ja(m1, m2, 0), 0}, {0, dm_ia_ja(m1, m2, 1)}};
234
235 for (int s1p = 0; s1p < 2; s1p++) {
236 for (int s2p = 0; s2p < 2; s2p++) {
237 om__.nonlocal(i)(m1, m2, j) +=
238 dm[s1p][s2p] * spin_rot_su2(s1, s1p) * std::conj(spin_rot_su2(s2, s2p));
239 }
240 }
241 }
242 }
243 }
244 }
245 }
246 }
247
248
249}
250
251}
252
253#endif
void copy(T *target__, T const *source__, size_t n__)
Copy memory inside a device.
Definition: acc.hpp:320
Namespace of the SIRIUS library.
Definition: sirius.f90:5
auto rotation_matrix_su2(std::array< double, 3 > u__, double theta__)
Generate SU(2) rotation matrix from the axes and angle.
Definition: rotation.hpp:48
auto conj(double x__)
Return complex conjugate of a number. For a real value this is the number itself.
Definition: math_tools.hpp:165
Occupation matrix of the LDA+U method.