SIRIUS 7.5.0
Electronic structure library and applications
hubbard_matrix.cpp
Go to the documentation of this file.
1// Copyright (c) 2013-2021 Anton Kozhevnikov, Thomas Schulthess
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
5// the following conditions are met:
6//
7// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
8// following disclaimer.
9// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
10// and the following disclaimer in the documentation and/or other materials provided with the distribution.
11//
12// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
13// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
15// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
17// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
18// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19
20/** \file hubbard_matrix.cpp
21 *
22 * \brief Base class for Hubbard occupancy and potential matrices.
23 */
24
25#include <iomanip>
26#include "hubbard_matrix.hpp"
27
28namespace sirius {
29
30Hubbard_matrix::Hubbard_matrix(Simulation_context& ctx__)
31 : ctx_(ctx__)
32{
33 if (!ctx_.full_potential() && ctx_.hubbard_correction()) {
34
35 /* first compute the number of atomic levels involved in the hubbard correction */
36 int num_atomic_level{0};
37 atomic_orbitals_.clear();
38 for (int ia = 0; ia < ctx_.unit_cell().num_atoms(); ia++) {
39 auto& atom_type = ctx_.unit_cell().atom(ia).type();
40 if (atom_type.hubbard_correction()) {
41 num_atomic_level += atom_type.lo_descriptor_hub().size();
42
43 for (int lo = 0; lo < static_cast<int>(atom_type.lo_descriptor_hub().size()); lo++) {
44 atomic_orbitals_.push_back(std::make_pair(ia, lo));
45 }
46 }
47 }
48
49 local_ = std::vector<sddk::mdarray<std::complex<double>, 3>>(num_atomic_level);
50
51 /* the offsets here match the offsets of the hubbard wave functions but
52 * are more fine grained. The offsets of the hubbard wave functions are
53 * for atom while here they are for each atomic level. Since all atomic
54 * level of a given atom are next to each other, the offset of the first
55 * atomic level of a given atom has the same value than the offset
56 * giving the position of the first hubbard wave function of this
57 * atom. */
58 offset_ = std::vector<int>(num_atomic_level, 0);
59
60 int size{0};
61 for (int at_lvl = 0; at_lvl < static_cast<int>(local_.size()); at_lvl++) {
62 offset_[at_lvl] = size;
63 int ia = atomic_orbitals_[at_lvl].first;
64 auto& atom_type = ctx_.unit_cell().atom(ia).type();
65 int lo_ind = atomic_orbitals_[at_lvl].second;
66 int l = atom_type.lo_descriptor_hub(lo_ind).l();
67 int mmax = 2 * l + 1;
68
69 local_[at_lvl] = sddk::mdarray<std::complex<double>, 3>(mmax, mmax, 4, sddk::memory_t::host, "local_hubbard");
70 local_[at_lvl].zero();
71 size += mmax;
72 }
73
74 nonlocal_.clear();
75 nonlocal_ = std::vector<sddk::mdarray<std::complex<double>, 3>>(ctx_.cfg().hubbard().nonlocal().size());
76 for (int i = 0; i < static_cast<int>(ctx_.cfg().hubbard().nonlocal().size()); i++) {
77 auto nl = ctx_.cfg().hubbard().nonlocal(i);
78 int il = nl.l()[0];
79 int jl = nl.l()[1];
80 nonlocal_[i] = sddk::mdarray<std::complex<double>, 3>(2 * il + 1, 2 * jl + 1, ctx_.num_spins(),
81 sddk::memory_t::host, "nonlocal_hubbard");
82 nonlocal_[i].zero();
83 }
84 }
85}
86
87void
88Hubbard_matrix::access(std::string const& what__, std::complex<double>* occ__, int ld__)
89{
90 if (!(what__ == "get" || what__ == "set")) {
91 std::stringstream s;
92 s << "wrong access label: " << what__;
93 RTE_THROW(s);
94 }
95
97 /* in non-collinear case the occupancy matrix is complex */
98 if (ctx_.num_mag_dims() == 3) {
99 occ_mtrx = sddk::mdarray<std::complex<double>, 4>(occ__, ld__, ld__, 4, ctx_.unit_cell().num_atoms());
100 } else {
101 occ_mtrx = sddk::mdarray<std::complex<double>, 4>(occ__, ld__, ld__, ctx_.num_spins(), ctx_.unit_cell().num_atoms());
102 }
103 if (what__ == "get") {
104 occ_mtrx.zero();
105 }
106
107 for (int at_lvl = 0; at_lvl < static_cast<int>(local().size()); at_lvl++) {
108 const int ia1 = atomic_orbitals(at_lvl).first;
109 const auto& atom = ctx_.unit_cell().atom(ia1);
110 const int lo = atomic_orbitals(at_lvl).second;
111 if (atom.type().lo_descriptor_hub(lo).use_for_calculation()) {
112 const int l = atom.type().lo_descriptor_hub(lo).l();
113 const int offset = offset_[at_lvl];
114 for (int m1 = -l; m1 <= l; m1++) {
115 for (int m2 = -l; m2 <= l; m2++) {
116 if (what__ == "get") {
117 for (int j = 0; j < ((ctx_.num_mag_dims() == 3) ? 4 : ctx_.num_spins()); j++) {
118 occ_mtrx(offset + l + m1, offset + l + m2, j, ia1) = this->local(at_lvl)(l + m1, l + m2, j);
119 }
120 } else {
121 for (int j = 0; j < ((ctx_.num_mag_dims() == 3) ? 4 : ctx_.num_spins()); j++) {
122 this->local(at_lvl)(l + m1, l + m2, j) = occ_mtrx(offset + l + m1, offset + l + m2, j, ia1);
123 }
124 }
125 }
126 }
127 }
128 }
129}
130
131void
132Hubbard_matrix::print_local(int at_lvl__, std::ostream& out__) const
133{
134 int const prec{5};
135 int const width{10};
136
137 auto print_number = [&](double x) { out__ << std::setw(width) << std::setprecision(prec) << std::fixed << x; };
138 auto const& atom = ctx_.unit_cell().atom(atomic_orbitals_[at_lvl__].first);
139
140 out__ << "level : " << atom.type().lo_descriptor_hub(atomic_orbitals_[at_lvl__].second).n();
141 out__ << " l: " << atom.type().lo_descriptor_hub(atomic_orbitals_[at_lvl__].second).l() << std::endl;
142 const int l = atom.type().lo_descriptor_hub(atomic_orbitals_[at_lvl__].second).l();
143 if (ctx_.num_mag_dims() != 3) {
144 int mmax = 2 * l + 1;
145 for (int is = 0; is < ctx_.num_spins(); is++) {
146 out__ << hbar(width * mmax, '-') << std::endl;
147 bool has_imag{false};
148 for (int m = 0; m < mmax; m++) {
149 for (int mp = 0; mp < mmax; mp++) {
150 if (std::abs(std::imag(this->local(at_lvl__)(m, mp, is))) > 1e-12) {
151 has_imag = true;
152 }
153 print_number(std::real(this->local(at_lvl__)(m, mp, is)));
154 }
155 out__ << std::endl;
156 }
157 if (has_imag) {
158 out__ << "imaginary part:" << std::endl;
159 for (int m = 0; m < mmax; m++) {
160 for (int mp = 0; mp < mmax; mp++) {
161 print_number(std::imag(this->local(at_lvl__)(m, mp, is)));
162 }
163 out__ << std::endl;
164 }
165 }
166 }
167 out__ << hbar(width * mmax, '-') << std::endl;
168 } else {
169 int mmax = 2 * l + 1;
170 out__ << hbar(2 * width * mmax + 3, '-') << std::endl;
171 for (int m = 0; m < mmax; m++) {
172 for (int mp = 0; mp < mmax; mp++) {
173 print_number(std::real(this->local(at_lvl__)(m, mp, 0)));
174 }
175 out__ << " | ";
176 for (int mp = 0; mp < mmax; mp++) {
177 print_number(std::real(this->local(at_lvl__)(m, mp, 2)));
178 }
179 out__ << std::endl;
180 }
181 out__ << hbar(2 * width * mmax + 3, '-') << std::endl;
182 for (int m = 0; m < mmax; m++) {
183 for (int mp = 0; mp < mmax; mp++) {
184 print_number(std::real(this->local(at_lvl__)(m, mp, 3)));
185 }
186 out__ << " | ";
187 for (int mp = 0; mp < mmax; mp++) {
188 print_number(std::real(this->local(at_lvl__)(m, mp, 1)));
189 }
190 out__ << std::endl;
191 }
192 out__ << hbar(2 * width * mmax + 3, '-') << std::endl;
193 }
194}
195
196void
197Hubbard_matrix::print_nonlocal(int idx__, std::ostream& out__) const
198{
199
200 auto nl = ctx_.cfg().hubbard().nonlocal(idx__);
201 int ia = nl.atom_pair()[0];
202 int ja = nl.atom_pair()[1];
203 int il = nl.l()[0];
204 int jl = nl.l()[1];
205 const int jb = 2 * jl + 1;
206 const int ib = 2 * il + 1;
207 r3::vector<int> T(nl.T());
208
209 r3::vector<double> r = ctx_.unit_cell().atom(ja).position() + T - ctx_.unit_cell().atom(ia).position();
210 /* convert to Cartesian coordinates */
211 auto rc = dot(ctx_.unit_cell().lattice_vectors(), r);
212
213 out__ << "atom: " << ia << ", l: " << il << " -> atom: " << ja << ", l: " << jl << ", T: " << T
214 << ", r: " << rc << std::endl;
215
216 int const prec{5};
217 int const width{10};
218 auto print_number = [&](double x) { out__ << std::setw(width) << std::setprecision(prec) << std::fixed << x; };
219
220 if (ctx_.num_mag_dims() != 3) {
221 for (int is = 0; is < ctx_.num_spins(); is++) {
222 out__ << hbar(width * jb, '-') << std::endl;
223 bool has_imag{false};
224 for (int m = 0; m < ib; m++) {
225 for (int mp = 0; mp < jb; mp++) {
226 if (std::abs(std::imag(this->nonlocal(idx__)(m, mp, is))) > 1e-12) {
227 has_imag = true;
228 }
229 print_number(std::real(this->nonlocal(idx__)(m, mp, is)));
230 }
231 out__ << std::endl;
232 }
233 if (has_imag) {
234 out__ << "imaginary part:" << std::endl;
235 for (int m = 0; m < ib; m++) {
236 for (int mp = 0; mp < jb; mp++) {
237 print_number(std::imag(this->nonlocal(idx__)(m, mp, is)));
238 }
239 out__ << std::endl;
240 }
241 }
242 }
243 out__ << hbar(width * jb, '-') << std::endl;
244 }
245 //} else {
246 // int l = atom.type().indexr_hub().am(0).l();
247 // int mmax = 2 *l + 1;
248 // draw_bar(2 * width * mmax + 3);
249 // for (int m = 0; m < mmax; m++) {
250 // for (int mp = 0; mp < mmax; mp++) {
251 // print_number(std::real(this->local(ia__)(m, mp, 0)));
252 // }
253 // out__ << " | ";
254 // for (int mp = 0; mp < mmax; mp++) {
255 // print_number(std::real(this->local(ia__)(m, mp, 2)));
256 // }
257 // out__ << std::endl;
258 // }
259 // draw_bar(2 * width * mmax + 3);
260 // for (int m = 0; m < mmax; m++) {
261 // for (int mp = 0; mp < mmax; mp++) {
262 // print_number(std::real(this->local(ia__)(m, mp, 3)));
263 // }
264 // out__ << " | ";
265 // for (int mp = 0; mp < mmax; mp++) {
266 // print_number(std::real(this->local(ia__)(m, mp, 1)));
267 // }
268 // out__ << std::endl;
269 // }
270 // draw_bar(2 * width * mmax + 3);
271 //}
272}
273
274void
276{
277 for (int ia = 0; ia < static_cast<int>(local_.size()); ia++) {
278 local_[ia].zero();
279 }
280
281 for (int i = 0; i < static_cast<int>(ctx_.cfg().hubbard().nonlocal().size()); i++) {
282 nonlocal_[i].zero();
283 }
284}
285
286} // namespace sirius
Multidimensional array with the column-major (Fortran) order.
Definition: memory.hpp:660
void zero(memory_t mem__, size_t idx0__, size_t n__)
Zero n elements starting from idx0.
Definition: memory.hpp:1316
Base class for Hubbard occupancy and potential matrices.
void zero(T *ptr__, size_t n__)
Zero the device memory.
Definition: acc.hpp:397
Namespace of the SIRIUS library.
Definition: sirius.f90:5