From 616ca5abfc19a614c086e5bf9360b348d144f5d4 Mon Sep 17 00:00:00 2001 From: Prefetch Date: Sun, 23 Jul 2023 14:48:58 +0200 Subject: Initial commit for publication --- .gitignore | 2 + atom.py | 158 +++++++++++++++++++++++++++++++++++++++++++++++ crystal.py | 138 +++++++++++++++++++++++++++++++++++++++++ density.py | 198 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ eigen.py | 187 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ element.py | 145 +++++++++++++++++++++++++++++++++++++++++++ main.py | 127 ++++++++++++++++++++++++++++++++++++++ visualize.py | 76 +++++++++++++++++++++++ xc.py | 196 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 1227 insertions(+) create mode 100644 .gitignore create mode 100644 atom.py create mode 100644 crystal.py create mode 100644 density.py create mode 100644 eigen.py create mode 100644 element.py create mode 100644 main.py create mode 100644 visualize.py create mode 100644 xc.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b0204ef --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/__pycache__/* +/result.csv diff --git a/atom.py b/atom.py new file mode 100644 index 0000000..c962a05 --- /dev/null +++ b/atom.py @@ -0,0 +1,158 @@ +from math import * +from scipy.special import erf, sph_harm, eval_genlaguerre + +import numpy as np + +import element + + +# Atomic Coulomb potential, modelled using HGH pseudopotentials. +# See https://arxiv.org/abs/cond-mat/9803286 for details. +class Atom: + def __init__(self, s, symbol, pos): + self.symbol = symbol + self.params = element.Element(symbol) # get element data + self.position = pos + + # Set up local grid in spherical coordinates + self.r = np.zeros((s.Nres, s.Nres, s.Nres)) + self.theta = np.zeros((s.Nres, s.Nres, s.Nres)) + self.phi = np.zeros((s.Nres, s.Nres, s.Nres)) + for i in range(s.Nres): + for j in range(s.Nres): + for k in range(s.Nres): + x = s.rx[i, j, k] + y = s.ry[i, j, k] + z = s.rz[i, j, k] + + r = sqrt((x - pos[0])**2 + (y - pos[1])**2 + (z - pos[2])**2) + if r == 0: + theta = 0 + phi = 0 + else: + theta = acos((z - pos[2]) / r) + phi = atan2((y - pos[1]), (x - pos[0])) + + self.r[i, j, k] = r + self.theta[i, j, k] = theta + self.phi[i, j, k] = phi + + # Initialize and cache orbitals for nonlocal potential contribution + self.orbitals = {} + for l in range(0, 3): + self.orbitals[l] = {} + for i in range(1, 4): + p = self.get_projector(l, i) + self.orbitals[l][i] = {} + for m in range(-l, l + 1): + Y = self.cubic_harmonic(m, l) + self.orbitals[l][i][m] = p * Y + + + # Local potential representing short-range interactions. + # This is Eq.(1), but without the long-range erf-term: + def get_local_potential(self): + V = np.zeros_like(self.r) + + rr = self.r / self.params.rloc # relative r + for i in range(4): + V += self.params.Cloc[i] * rr**(2 * i) * np.exp(-0.5 * rr**2) + + return V + + + # The local erf-term decays as 1/r, which is problematic for periodicity. + # Instead, we replace it with a Hartree potential from a Gaussian charge. + # This is exact: multiply first term of Eq.(5) by g^2 (from Poisson). + def get_local_density(self): + alpha = 0.5 / self.params.rloc**2 + rho = 4 * alpha**(3/2) * np.exp(-alpha * self.r**2) / sqrt(pi) + + # Normalize in spherical coordinates: 4pi int rho(r) r^2 dr = Zion + rho *= self.params.Zion / (4 * pi) + return rho + + + # Given a psi in real space, apply nonlocal potential by integration + # Vpsi = sum_{lijm} |p_i> h_{ij} + def apply_nonlocal_potential(self, psi): + Vpsi = np.zeros_like(psi) + + # Note that these if-statements provide a significant speedup + for l in range(0, 3): + if self.params.rl[l] != 0: + for i in range(1, 4): + for j in range(1, 4): + if self.params.h[l][i][j] != 0: + for m in range(-l, l + 1): + # Inner product normalization is handled by caller + ip = np.vdot(self.orbitals[l][j][m], psi) + Vpsi += self.orbitals[l][i][m] * self.params.h[l][i][j] * ip + + return Vpsi + + + # Calculate projectors p_i^l(r) for nonlocal potential, see Eq.(3) + def get_projector(self, l, i): + p = np.zeros_like(self.r) + + if self.params.rl[l] != 0: + p = sqrt(2) * self.r**(l + 2 * (i - 1)) + p *= np.exp(-0.5 * (self.r / self.params.rl[l])**2) + p /= self.params.rl[l]**(l + 0.5 * (4 * i - 1)) + p /= sqrt(gamma(l + 0.5 * (4 * i - 1))) + + return p + + + # Guess this atom's electron density (to initialize main DFT loop) + def guess_density(self): + # We choose a spherical gaussian with standard deviation sigma + sigma = 1 + rho = (sqrt(2 / pi) / sigma**3) * np.exp(- 0.5 * self.r**2 / sigma**2) + + # Normalize in spherical coordinates: 4pi int rho(r) r^2 dr = Zion + rho /= self.params.Zion / (4 * pi) + return rho + + + # Guess this atom's eigenstates (to initialize eigensolver). + # We simply choose hydrogen orbitals, without any modifications: + def guess_orbitals(self, Norb): + psis = [] + for n in range(1, 4): + rho = 2 * self.r / n + for l in range(0, n): + for m in range(-l, l + 1): + psi = self.cubic_harmonic(m, l) + psi *= np.exp(-0.5 * rho) * rho**l + psi *= eval_genlaguerre(n - l - 1, 2 * l + 1, rho) + psis.append(psi) + + return psis[0 : Norb] + + + # Use cubic harmonics instead of spherical harmonics. This makes no + # physical difference, but it avoids confusion when visualizing. + def cubic_harmonic(self, m, l): + # Note that phi and theta are swapped according to SciPy + Yp = sph_harm( abs(m), l, self.phi, self.theta) + Yn = sph_harm(-abs(m), l, self.phi, self.theta) + + f = np.zeros_like(self.r) + if m == -3: + f = (1 / sqrt(2)) * (Yn - Yp) + if m == -2: + f = (1j / sqrt(2)) * (Yn - Yp) + if m == -1: + f = (1 / sqrt(2)) * (Yn - Yp) + if m == 0: + f = Yp + if m == 1: + f = (1j / sqrt(2)) * (Yn + Yp) + if m == 2: + f = (1 / sqrt(2)) * (Yn + Yp) + if m == 3: + f = (1j / sqrt(2)) * (Yn + Yp) + + return np.real(f) diff --git a/crystal.py b/crystal.py new file mode 100644 index 0000000..58a1cee --- /dev/null +++ b/crystal.py @@ -0,0 +1,138 @@ +from math import * + +import numpy as np + + +# Grid generator for direct and reciprocal space +class UnitCell: + def __init__(self, N, basis): + self.N = N + self.d = basis / N # displacement basis of grid points + + + # Generate NxNxN grid to represent functions inside the cell + def grid_points(self): + gx = np.zeros((self.N, self.N, self.N)) + gy = np.zeros_like(gx) + gz = np.zeros_like(gx) + + if self.N % 2 == 0: + ns = range(-self.N // 2, self.N // 2) + else: + ns = range(-(self.N - 1) // 2, (self.N - 1) // 2 + 1) + + for i in range(self.N): + for j in range(self.N): + for k in range(self.N): + v = ns[i] * self.d[0] + ns[j] * self.d[1] + ns[k] * self.d[2] + gx[i, j, k] = v[0] + gy[i, j, k] = v[1] + gz[i, j, k] = v[2] + + return gx, gy, gz + + + # Volume of smallest parallelepiped representable on this grid + def differential_volume(self): + dV = np.dot(self.d[0], np.cross(self.d[1], self.d[2])) + return abs(dV) + + + +# Base class for periodic structures, don't use it directly +class Crystal: + def __init__(self, N, basis): + self.a = basis + self.b = self.get_reciprocal_basis() + self.cell_direct = UnitCell(N, self.a) + self.cell_fourier = UnitCell(N, self.b * N) + + + def get_reciprocal_basis(self): + V = np.dot(self.a[0], np.cross(self.a[1], self.a[2])) + b1 = (2 * pi / V) * np.cross(self.a[1], self.a[2]) + b2 = (2 * pi / V) * np.cross(self.a[2], self.a[0]) + b3 = (2 * pi / V) * np.cross(self.a[0], self.a[1]) + return np.array([b1, b2, b3]) + + + def get_grid_direct(self): + rx, ry, rz = self.cell_direct.grid_points() + dr = self.cell_direct.differential_volume() + return rx, ry, rz, dr + + + def get_grid_fourier(self): + qx, qy, qz = self.cell_fourier.grid_points() + dq = self.cell_fourier.differential_volume() + return qx, qy, qz, dq + + + # Convert sequence of letters to coordinates for band structure plots. + # The letters' meanings are stored in a dictionary self.bz_points. + def get_brillouin_zone_path(self, chars): + path = [] + for c in chars: + k = self.bz_points[c] # should probably catch KeyError here + p = k[0] * self.b[0] + k[1] * self.b[1] + k[2] * self.b[2] + path.append(p) + + return path + + + +# Derived class for simple cubic (sc) crystals +class CubicSimple(Crystal): + def __init__(self, N, a): + a1 = np.array((a, 0, 0)) + a2 = np.array((0, a, 0)) + a3 = np.array((0, 0, a)) + super().__init__(N, np.array([a1, a2, a3])) + + # Taken from http://lampx.tugraz.at/~hadley/ss1/bzones/sc.php + bz_points = { + "G" : (0, 0, 0), + "R" : (0.5, 0.5, 0.5), + "X" : (0, 0.5, 0), + "M" : (0.5, 0.5, 0) + } + + + +# Derived class for body-centered cubic (bcc) crystals +class CubicBodyCentered(Crystal): + def __init__(self, N, a): + a1 = np.array((-a/2, a/2, a/2)) + a2 = np.array(( a/2, -a/2, a/2)) + a3 = np.array(( a/2, a/2, -a/2)) + super().__init__(N, np.array([a1, a2, a3])) + + # Taken from http://lampx.tugraz.at/~hadley/ss1/bzones/bcc.php + bz_points = { + "G" : ( 0, 0, 0), + "H" : (-0.5, 0.5, 0.5), + "P" : ( 0.25, 0.25, 0.25), + "N" : ( 0, 0.5, 0) + } + + + +# Derived class for face-centered cubic (fcc) crystals +class CubicFaceCentered(Crystal): + def __init__(self, N, a): + a1 = np.array((0, a/2, a/2)) + a2 = np.array((a/2, 0, a/2)) + a3 = np.array((a/2, a/2, 0)) + super().__init__(N, np.array([a1, a2, a3])) + + # Taken from http://lampx.tugraz.at/~hadley/ss1/bzones/fcc.php + bz_points = { + "G" : (0, 0, 0), + "X" : (0, 0.5, 0.5), + "L" : (0.5, 0.5, 0.5), + "W" : (0.25, 0.75, 0.5), + "U" : (0.25, 0.625, 0.625), + "K" : (0.375, 0.75, 0.375) + } + + diff --git a/density.py b/density.py new file mode 100644 index 0000000..0162baf --- /dev/null +++ b/density.py @@ -0,0 +1,198 @@ +from math import * +from itertools import permutations + +import numpy as np +import numpy.fft +import numpy.linalg + +import eigen +import xc + + +# Get electron density by filling orbitals with electrons +def get_density(psis, Nele): + rho = np.zeros_like(psis[0], dtype="float64") + for n in range(Nele): + rho += abs(psis[n // 2])**2 + return rho + + +# Given a density, calculate the Hartree (Coulomb) potential +def get_hartree_potential(s, rho_r): + rho_q = np.fft.fftn(rho_r) + + V_q = np.zeros_like(rho_q) + for i in range(s.Nres): + for j in range(s.Nres): + for k in range(s.Nres): + # Ignore the q=0 component, which gives a constant shift. + # This makes the cell neutral, eliminating long-range effects. + if s.q2[i, j, k] != 0: + V_q[i, j, k] = 4 * pi * rho_q[i, j, k] / s.q2[i, j, k] + V_r = np.fft.ifftn(V_q) + + return np.real(V_r) + + +# Pulay mixing: try this one weird trick to accelerate your convergence! +# Linearly combine several old densities to get a new density, +# which is hopefully closer to the self-consistent optimum. +def mix_densities(fhist, Rhist): + # Number of old functions to combine + Nmix = min(5, max(2, len(fhist))) + + # We need at least two old densities to work with. + # On the first SCF iteration, we return the raw output density. + if len(fhist) < Nmix: + return fhist[-1] + Rhist[-1] + + # Number of initial elements to ignore in the history + offset = len(fhist) - Nmix + + # Set up the system matrix + A = np.zeros((Nmix, Nmix), dtype=fhist[-1].dtype) + for r in range(Nmix): + for c in range(Nmix): + A[r, c] = np.vdot(Rhist[offset + r], Rhist[offset + c]) + + # Solve the system using matrix inversion, see for example + # Eq.(38) in https://doi.org/10.1103/PhysRevB.54.11169 + try: + Ainv = np.linalg.inv(A) + alphas = Ainv.sum(axis=1) + alphas /= alphas.sum() + except np.linalg.LinAlgError: # A is singular + alphas = np.zeros(Nmix) + alphas[-1] = 1.0 + + # Construct next function as a linear combination of history. + # Beta "nudges" result, to prevent getting stuck in a subspace. + beta = 0.1 + f = np.zeros_like(fhist[-1]) + for n in range(Nmix): + f += alphas[n] * (fhist[offset + n] + beta * Rhist[offset + n]) + + return f + + +# Generate list of k-points that nicely covers the first Brillouin zone +def sample_brillouin_zone(s): + N = s.bzmesh + # Generate N0xN1xN2 k-points using Monkhorst-Pack scheme + kpoints = [] + for n0 in range(1, N[0] + 1): + for n1 in range(1, N[1] + 1): + for n2 in range(1, N[2] + 1): + t0 = (2 * n0 - N[0] - 1) / (2 * N[0]) * s.crystal.b[0] + t1 = (2 * n1 - N[1] - 1) / (2 * N[1]) * s.crystal.b[1] + t2 = (2 * n2 - N[2] - 1) / (2 * N[2]) * s.crystal.b[2] + kpoints.append(t0 + t1 + t2) + + # Thanks to symmetry, most of these k-points are redundant. + # which we will exploit to greatly improve performance. + + # This can be done much better; these are my messy hacks. + + # Remember for each point, what it is equivalent to (maybe only itself) + equivto = list(range(len(kpoints))) + for i in range(len(kpoints)): + # Cubic crystal symmetry: + # Permute Cartesian components to find equivalences (this feels dirty) + perms = set(permutations(tuple(abs(kpoints[i])))) + for j in range(0, i): # only consider previous points + if tuple(abs(kpoints[j])) in perms: + equivto[i] = j + break + + # Each k gets a weight based on how many equivalent points it has + weights = [] + for i in range(len(kpoints)): + weights.append(equivto.count(i)) + # Delete redundant points, yielding a >75% reduction in computation + for i in reversed(range(len(kpoints))): + if weights[i] == 0: + del kpoints[i] + del weights[i] + + return kpoints, weights + + +# Static local potential contribution, from the atoms in the cell +def get_local_external_potential(s, Nele): + Vloc_r = np.zeros((s.Nres, s.Nres, s.Nres)) + rhon_r = np.zeros((s.Nres, s.Nres, s.Nres)) + for a in s.atoms: + Vloc_r += a.get_local_potential() + rhon_r += a.get_local_density() + + # Coulomb potential from ionic compensation charge + rhon_r /= np.sum(rhon_r) * s.dr / Nele # ensure normalization + Vion_r = get_hartree_potential(s, -rhon_r) + + return Vloc_r + Vion_r + + +# The whole point of DFT: iteratively find the self-consistent electron density +def get_selfconsistent_density(s, thresh=1e-8): + # Initial guess for mean electron density + rho = np.zeros((s.Nres, s.Nres, s.Nres)) + Nele = 0 # number of electrons + for a in s.atoms: + rho += a.guess_density() + Nele += a.params.Zion + rho /= np.sum(rho) * s.dr / Nele # ensure normalization + + # This is the "external potential" we use as a base (sans nonlocality) + Vext = get_local_external_potential(s, Nele) + + # Get weighted set of k-points in first Brillouin zone + ksamples, kweights = sample_brillouin_zone(s) + + # Each k-point gives a certain density contribution + rhos = [] + for k in ksamples: + rhos.append(rho.copy()) + + # Histories for Pulay density mixing + rho_hist = [] + residue_hist = [] + # You could do Pulay mixing separately for each k-point's density, + # but according to my tests, that makes no meaningful difference. + + cycles = 0 + improv = 1 + # Main self-consistency loop, where the magic happens + while improv > thresh: + cycles += 1 + print("Self-consistency cycle {} running...".format(cycles)) + + # Calculate additional potential from mean density + Vh = get_hartree_potential(s, rho) + #Vx = xc.get_x_potential_gga_lb(s, rho) + #Vc = xc.get_c_potential_gga_lyp(s, rho) + #Vxc = xc.get_xc_potential_lda_teter93(s, rho) + Vxc = xc.get_x_potential_lda_slater(s, rho) + Veff = Vext + Vh + Vxc + + for i in range(len(ksamples)): + prefix = " ({}/{}) ".format(i + 1, len(ksamples)) + energies, psis_r, psis_q = eigen.solve_kohnsham_equation(s, Veff, ksamples[i], prefix) + rhos[i] = get_density(psis_r, Nele) + + # Calculate new average density from orbitals + rho_new = np.zeros((s.Nres, s.Nres, s.Nres)) + for i in range(len(rhos)): + rho_new += kweights[i] * rhos[i] + rho_new /= sum(kweights) + + # Get next cycle's input density from Pulay mixing + rho_hist.append(rho) + residue_hist.append(rho_new - rho) + rho = mix_densities(rho_hist, residue_hist) + + # Use the norm of the residual as a metric for convergence + improv = np.sum(residue_hist[-1]**2) * s.dr / Nele**2 + print(" Cycle {} done, improvement {:.2e}".format(cycles, improv)) + + print("Found self-consistent electron density in {} cycles".format(cycles)) + return rho, Veff diff --git a/eigen.py b/eigen.py new file mode 100644 index 0000000..1dbb2a9 --- /dev/null +++ b/eigen.py @@ -0,0 +1,187 @@ +from math import * + +import numpy as np +import numpy.fft + +import visualize + + +# Normalize wavefunction with respect to volume element dV +def normalize(state, dV): + norm2 = np.vdot(state, state) * dV + return state / sqrt(abs(norm2)) + + +# Gram-Schmidt orthonormalization with respect to dV +def orthonormalize(basis, dV): + for n in range(len(basis)): + psi = basis[n] + + for i in range(n): + psi -= basis[i] * np.vdot(basis[i], psi) * dV + + basis[n] = normalize(psi, dV) + + return basis + + +# Zero all components with kinetic energy less than Ecut +def enforce_cutoff(s, psis_q, kq2): + for i in range(s.Nres): + for j in range(s.Nres): + for k in range(s.Nres): + if kq2[i, j, k] > 2 * s.Ecut: + for psi_q in psis_q: + psi_q[i, j, k] = 0 + return orthonormalize(psis_q, s.dq) + + +# Apply the Kohn-Sham Hamiltonian to a normalized state psi +def apply_hamiltonian(s, psi_q, Veff_r, kq2): + # Apply kinetic energy in q-domain + Tpsi_q = 0.5 * kq2 * psi_q + + # Apply local+nonlocal potential in r-domain + psi_r = np.fft.ifftn(psi_q) + # We don't normalize: the iFFT ensures a correctly normalized result + Vpsi_r = Veff_r * psi_r # local part + for a in s.atoms: # nonlocal part + Vnon_r = a.apply_nonlocal_potential(psi_r) * s.dr + Vpsi_r += Vnon_r + Vpsi_q = np.fft.fftn(Vpsi_r) + + return Tpsi_q + Vpsi_q + + +# Resisual vector R, which the eigensolver tries to minimize +def get_residual(s, psi_q, Hpsi_q, kq2): + ene = np.vdot(psi_q, Hpsi_q) * s.dq # assuming normalization + R_q = Hpsi_q - ene * psi_q + + # Calculate preconditioned residual PR for faster convergence: + # The ideal PR satisfies PR = (H - ene)^-1 R, such that (H - ene) PR = R. + # This operator (H - ene) is hard to invert, so we approximate it using + # the kinetic energy only: -0.5 \nabla^2 PR = R (the Poisson equation). + PR_q = 2 * R_q / (kq2 + 1) + # This is an even rougher approximation, but it works well enough. + + return PR_q + + +# One iteration of the block Davidson eigensolver algorithm +def diagonalize_subspace(s, psis, V, kq2): + # Apply Hamiltonian to psis and cache results + Hbasis = [] + for n in range(len(psis)): + Hbasis.append(apply_hamiltonian(s, psis[n], V, kq2)) + + # Calculate corresponding residuals + resids = [] + for n in range(len(psis)): + R = get_residual(s, psis[n], Hbasis[n], kq2) + resids.append(R) + + # Augment the input basis (psis) with the residuals + basis = psis + resids + basis = enforce_cutoff(s, basis, kq2) + + Ndim = len(basis) + + # Apply Hamiltonian to orthonormalized residuals and cache results + for n in range(len(psis), Ndim): + Hbasis.append(apply_hamiltonian(s, basis[n], V, kq2)) + + # Subspace Hamiltonian matrix with elements + Hsub = np.zeros((Ndim, Ndim), dtype="complex128") + # Exploit Hermiticity: only calculate the diagonal and upper triangle, + # then copy the elements from the upper to the lower triangle. + for r in range(Ndim): + for c in range(r, Ndim): + Hsub[r, c] = np.vdot(basis[r], Hbasis[c]) * s.dq + for r in range(Ndim): + for c in range(0, r): + Hsub[r, c] = np.conj(Hsub[c, r]) + + # Find the eigenvectors and eigenvalues of Hsub + evals, evecs = np.linalg.eigh(Hsub) + + # Express the eigenvectors in the original basis (input psis) + result = [] + for n in range(Ndim // 2): + psi = np.zeros((s.Nres, s.Nres, s.Nres), dtype="complex128") + for b in range(Ndim): + psi += basis[b] * evecs[b, n] + result.append(psi) + + return evals, result + + +# Small optimization: cache |k+q|^2 ahead of time +def cache_kinetic_factor(s, kpoint): + kq2 = np.zeros((s.Nres, s.Nres, s.Nres)) + for i in range(s.Nres): + for j in range(s.Nres): + for k in range(s.Nres): + q = np.array((s.qx[i, j, k], s.qy[i, j, k], s.qz[i, j, k])) + kq = kpoint + q + kq2[i, j, k] = np.dot(kq, kq) + return kq2 + + +# Initial guess from atoms' individual (approximate) orbitals +def guess_orbitals(s): + psis_q = [] + for a in s.atoms: + psis_r = a.guess_orbitals(s.Norb) + + # Convert guesses to Fourier space + for psi_r in psis_r: + psi_q = np.fft.fftn(psi_r) + psis_q.append(psi_q) + + return orthonormalize(psis_q, s.dq) + + +# Get lowest eigenvalues and eigenstates of Kohn-Sham equation +def solve_kohnsham_equation(s, V, k, prefix="", thresh=1e-4): + kq2 = cache_kinetic_factor(s, k) + + # Initialize psis, which we will refine shortly + psis_q = guess_orbitals(s) + + print(prefix + "Eigensolver cycle 1 running...") + cycles = 0 + improv = 1 + energy_old = 0 + # Iterative eigensolver loop + while improv > thresh: + cycles += 1 + + # Eliminate high-frequency noise (little effect in practice) + psis_q = enforce_cutoff(s, psis_q, kq2) + + # Refine eigenstates using one step of the block Davidson method + energies, psis_q = diagonalize_subspace(s, psis_q, V, kq2) + + # To measure progress, we look at the change of (N+1)th eigenvalue, + # where N is the number of bands we are interested in converging. + energy_new = energies[s.Neig] + improv = abs(energy_new - energy_old) / abs(energy_new) + energy_old = energy_new + + # Clear line and (re)print progress information + print("\033[A\033[2K" + prefix, end="") + print("Eigensolver cycle {} done".format(cycles), end="") + if cycles > 1: + print(", improvement {:.2e}".format(improv), end="") + print("") + + # Convert psis to real space and normalize them there + psis_r = [] + for n in range(len(psis_q)): + psi_r = np.fft.ifftn(psis_q[n]) + psis_r.append(normalize(psi_r, s.dr)) + + # energies is a NumPy array right now + return energies.tolist(), psis_r, psis_q + diff --git a/element.py b/element.py new file mode 100644 index 0000000..a8111ad --- /dev/null +++ b/element.py @@ -0,0 +1,145 @@ +from math import * + + +# Interface to the HGH pseudopotential parameters. +# See https://arxiv.org/abs/cond-mat/9803286 +class Element: + def __init__(self, symbol): + # Get raw data from the table below + global elements + el = elements[symbol] + + self.Zion = el[0] + self.rloc = el[1] + self.Cloc = [el[2], el[3], el[4], el[5]] + self.rl = [el[6], el[10], el[14]] + + self.h = {} + for l in range(0, 3): + self.h[l] = {} + for i in range(1, 4): + self.h[l][i] = {} + # Only the diagonal h-values are explicitly given + self.h[l][i][i] = el[6 + 4 * l + i] + + # Calculate the off-diagonal h-values, see Eqs.(20)-(28) + self.h[0][1][2] = -(1/2) * sqrt( 3/ 5) * self.h[0][2][2] + self.h[0][1][3] = (1/2) * sqrt( 5/ 21) * self.h[0][3][3] + self.h[0][2][3] = -(1/2) * sqrt(100/ 63) * self.h[0][3][3] + self.h[1][1][2] = -(1/2) * sqrt( 5/ 7) * self.h[1][2][2] + self.h[1][1][3] = (1/6) * sqrt( 35/ 11) * self.h[1][3][3] + self.h[1][2][3] = -(1/6) * sqrt(196/ 11) * self.h[1][3][3] + self.h[2][1][2] = -(1/2) * sqrt( 7/ 9) * self.h[2][2][2] + self.h[2][1][3] = (1/2) * sqrt( 63/143) * self.h[2][3][3] + self.h[2][2][3] = -(1/2) * sqrt(324/143) * self.h[2][3][3] + + # Mirror around diagonal, see Eq.(29) + for l in range(0, 3): + for i in range(1, 4): + for j in range(1, i): + self.h[l][i][j] = self.h[l][j][i] + + +# Raw HGH pseudopotential parameters for semiconductor materials. +# See https://arxiv.org/abs/cond-mat/9803286 +# +# Indices: Meanings: +# 0 Zion +# 1 2 3 4 5 rloc C1 C2 C3 C4 +# 6 7 8 9 r0 h0_1 h0_2 h0_3 +# 10 11 12 13 r1 h1_1 h1_2 h1_3 +# 14 15 16 17 r2 h2_1 h2_2 h2_3 +elements = { + "B" : [3, + 0.433930, -5.578642, 0.804251, 0.000000, 0.000000, + 0.373843, 6.233928, 0.000000, 0.000000, + 0.360393, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000], + "C" : [4, + 0.348830, -8.513771, 1.228432, 0.000000, 0.000000, + 0.304553, 9.522842, 0.000000, 0.000000, + 0.232677, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000], + "N" : [5, + 0.289179, -12.234820, 1.766407, 0.000000, 0.000000, + 0.256605, 13.552243, 0.000000, 0.000000, + 0.270134, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000], + "O" : [6, + 0.247621, -16.580318, 2.395701, 0.000000, 0.000000, + 0.221786, 18.266917, 0.000000, 0.000000, + 0.256829, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000], + "Al" : [3, + 0.450000, -8.491351, 0.000000, 0.000000, 0.000000, + 0.460104, 5.088340, 2.679700, 0.000000, + 0.536744, 2.193438, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000], + "Si" : [4, + 0.440000, -7.336103, 0.000000, 0.000000, 0.000000, + 0.422738, 5.906928, 3.258196, 0.000000, + 0.484278, 2.727013, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000], + "P" : [5, + 0.430000, -6.654220, 0.000000, 0.000000, 0.000000, + 0.389803, 6.842136, 3.856693, 0.000000, + 0.440796, 3.282606, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000], + "S" : [6, + 0.420000, -6.554492, 0.000000, 0.000000, 0.000000, + 0.361757, 7.905303, 4.471698, 0.000000, + 0.405285, 3.866579, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000], + "Zn" : [2, + 0.570000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.640712, 2.088557, -0.218270, -0.941317, + 0.967605, 0.163546, -0.227086, 0.000000, + 1.330352, 0.010486, 0.000000, 0.000000], + "Ga" : [3, + 0.560000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.610791, 2.369325, -0.249015, -0.551796, + 0.704596, 0.746305, -0.513132, 0.000000, + 0.982580, 0.075437, 0.000000, 0.000000], + "Ge" : [4, + 0.540000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.493743, 3.826891, 1.100231, -1.344218, + 0.601064, 1.362518, -0.627370, 0.000000, + 0.788369, 0.191205, 0.000000, 0.000000], + "As" : [5, + 0.520000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.456400, 4.560761, 1.692389, -1.373804, + 0.550562, 1.812247, -0.646727, 0.000000, + 0.685283, 0.312373, 0.000000, 0.000000], + "Se" : [6, + 0.510000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.432531, 5.145131, 2.052009, -1.369203, + 0.472473, 2.858806, -0.590671, 0.000000, + 0.613420, 0.434829, 0.000000, 0.000000], + "Cd" : [2, + 0.625000, -1.796838, 0.000000, 0.000000, 0.000000, + 0.828465, 1.485292, -0.424753, -0.407986, + 0.972873, 0.469208, -0.448111, 0.000000, + 1.240949, 0.065412, 0.000000, 0.000000], + "In" : [3, + 0.610000, 2.865777, 0.000000, 0.000000, 0.000000, + 0.770602, 1.256194, -0.397255, -0.278329, + 0.858132, 0.494459, -0.380789, 0.000000, + 1.088691, 0.129208, 0.000000, 0.000000], + "Sn" : [4, + 0.605000, 4.610912, 0.000000, 0.000000, 0.000000, + 0.663544, 1.648791, -0.141974, -0.576546, + 0.745865, 0.769355, -0.445070, 0.000000, + 0.944459, 0.225115, 0.000000, 0.000000], + "Sb" : [5, + 0.590000, 6.680228, 0.000000, 0.000000, 0.000000, + 0.597684, 1.951477, 0.037537, -0.786631, + 0.672122, 0.970313, -0.466731, 0.000000, + 0.856557, 0.300103, 0.000000, 0.000000], + "Te" : [6, + 0.575000, 9.387085, 0.000000, 0.000000, 0.000000, + 0.556456, 2.046890, -0.029333, -0.881119, + 0.615262, 1.033478, -0.481172, 0.000000, + 0.805101, 0.317411, 0.000000, 0.000000], +} + + diff --git a/main.py b/main.py new file mode 100644 index 0000000..23a21fc --- /dev/null +++ b/main.py @@ -0,0 +1,127 @@ +import csv +import math + +import numpy as np + +import atom +import crystal +import density +import eigen + + +class Settings: + pass + + +# Set up the object "s" that contains all settings for the run. +# After this function, "s" is not modified. Adjust settings here. +def initialize(): + s = Settings() + + # Resolution: store functions on an NxNxN grid + s.Nres = 16 + + # Lattice constant of conventional cell, in Bohr radii + #a = 6.7463 # C + a = 10.261 # Si + #a = 10.298 # GaP + #a = 10.683 # Ge/GaAs + #a = 11.449 # InAs + + # Crystal structure, choose Cubic{Simple,BodyCentered,FaceCentered} + s.crystal = crystal.CubicFaceCentered(s.Nres, a) + # Grid dimensions for k-point sampling of Brillouin zone + s.bzmesh = (3, 3, 3) + + # Sequence of Brillouin zone symmetry points for band structure plot + s.bzpath = "LGXWULWXUG" + # Number of substeps to take between two symmetry points in bzpath + s.Nbzsub = 10 + + # Number of orbitals to use per atom (but not necessarily converge) + s.Norb = 8 + # Number of total Kohn-Sham eigenstates to converge + s.Neig = 12 + + # Elements and positions of atoms in the unit cell. + # Positions are expressed in the cell basis (a1,a2,a3). + atoms = [ + ("Si", (-0.125, -0.125, -0.125)), + ("Si", ( 0.125, 0.125, 0.125)), + ] + + # Plane-wave cutoff energy in Hartree + s.Ecut = 50 + + # Finalize the "s" object. Avoid editing below this line. + + # Get the grid point coordinates in direct and Fourier space + s.rx, s.ry, s.rz, s.dr = s.crystal.get_grid_direct() + s.qx, s.qy, s.qz, s.dq = s.crystal.get_grid_fourier() + # Remember to call fftshift before plotting! + s.qx = np.fft.ifftshift(s.qx) + s.qy = np.fft.ifftshift(s.qy) + s.qz = np.fft.ifftshift(s.qz) + + # For convenience + s.r2 = s.rx**2 + s.ry**2 + s.rz**2 + s.q2 = s.qx**2 + s.qy**2 + s.qz**2 + + # Place the atoms on the grid as specified in "atoms" + s.atoms = [] + basis = s.crystal.a + for a in atoms: + pos = a[1][0] * basis[0] + a[1][1] * basis[1] + a[1][2] * basis[2] + s.atoms.append(atom.Atom(s, a[0], pos)) + + return s + + +def main(): + # Get settings object "s" + s = initialize() + + # It would be madness to try to converge the electron density + # for all band structure k-points at the same time, so instead + # we converge it for a small set and then assume it's fixed. + rho, Veff = density.get_selfconsistent_density(s) + print("\nCalculating band structure with fixed electron density") + + # Get list of positions of high-symmetry k-points + ksteps = s.crystal.get_brillouin_zone_path(s.bzpath) + Nsteps = len(ksteps) - 1 + + # Total distance in k-space, used for first CSV column + kdistance = 0.0 + + # Open output CSV file for writing + file = open("result.csv", "w", newline="") + csvw = csv.writer(file, quoting=csv.QUOTE_MINIMAL) + + # Loop over symmetry points, except last one + for i in range(Nsteps): + print("Band calculation part {}/{} running...".format(i + 1, Nsteps)) + + # Subdivide path between this point and next point + if i < Nsteps - 1: + ksubs = np.linspace(ksteps[i], ksteps[i + 1], s.Nbzsub, endpoint=False) + else: # include endpoint in last iteration + ksubs = np.linspace(ksteps[i], ksteps[i + 1], s.Nbzsub + 1, endpoint=True) + + # Loop over points on subdivided path + for j, kp in enumerate(ksubs): + # Solve Kohn-Sham equation for this k-point, with fixed Veff + pf = " ({}/{}) ".format(j + 1, len(ksubs)) + energies, psis_r, psis_q = eigen.solve_kohnsham_equation(s, Veff, kp, pf) + + csvw.writerow([kdistance] + energies[0 : s.Neig]) + + # Keep track of how far we moved in k-space + kd = np.linalg.norm(ksteps[i + 1] - ksteps[i]) + kdistance += kd / s.Nbzsub + + file.close() + + +if __name__ == "__main__": + main() diff --git a/visualize.py b/visualize.py new file mode 100644 index 0000000..6746458 --- /dev/null +++ b/visualize.py @@ -0,0 +1,76 @@ +import numpy as np +import matplotlib.pyplot as plt + + +# Some messy visualization functions I hacked together + + +def cross_r(s, fs): + i = s.Nres // 2 + + # To visualize the diagonals + signs = np.array([-1] * i + [1] * i) + + xs = s.rx[:, i, i] + ys = s.ry[i, :, i] + zs = s.rz[i, i, :] + + for f in fs: + plt.plot(xs, f[:, i, i]) + plt.plot(ys, f[i, :, i]) + plt.plot(zs, f[i, i, :]) + plt.plot(signs * np.sqrt(ys**2 + zs**2), np.diagonal(f, axis1=1, axis2=2)[i]) + plt.plot(signs * np.sqrt(xs**2 + ys**2), np.diagonal(f, axis1=0, axis2=1)[i]) + plt.plot(signs * np.sqrt(xs**2 + zs**2), np.diagonal(f, axis1=0, axis2=2)[i]) + plt.show() + + +def height_r(s, f): + i = s.Nres // 2 + + fig = plt.figure() + ax = plt.axes(projection="3d") + Xxy, Yxy = np.meshgrid(s.rx[:, i, i], s.ry[i, :, i]) + Yyz, Zyz = np.meshgrid(s.ry[i, :, i], s.rz[i, i, :]) + Xxz, Zxz = np.meshgrid(s.rx[:, i, i], s.rz[i, i, :]) + ax.set_xlabel("x") + ax.set_ylabel("y") + #ax.plot_surface(Xxy, Yxy, f[:, :, i], cmap="viridis") + #ax.plot_surface(Yyz, Zyz, f[i, :, :], cmap="viridis") + ax.plot_surface(Xxz, Zxz, f[:, i, :], cmap="viridis") + plt.show() + + +def shape_r(s, f, c): + ppts_x, ppts_y, ppts_z = [], [], [] + npts_x, npts_y, npts_z = [], [], [] + pvals = [] + nvals = [] + + for ix in range(s.Nres): + for iy in range(s.Nres): + for iz in range(s.Nres): + if f[ix, iy, iz] >= c: + ppts_x.append(s.rx[ix, iy, iz]) + ppts_y.append(s.ry[ix, iy, iz]) + ppts_z.append(s.rz[ix, iy, iz]) + pvals.append(f[ix, iy, iz]) + if f[ix, iy, iz] <= -c: + npts_x.append(s.rx[ix, iy, iz]) + npts_y.append(s.ry[ix, iy, iz]) + npts_z.append(s.rz[ix, iy, iz]) + nvals.append(f[ix, iy, iz]) + + fig = plt.figure() + ax = plt.axes(projection="3d") + ax.set_xlim(np.amin(s.rx), np.amax(s.rx)) + ax.set_ylim(np.amin(s.ry), np.amax(s.ry)) + ax.set_zlim(np.amin(s.rz), np.amax(s.rz)) + ax.set_xlabel("x") + ax.set_ylabel("y") + ax.set_zlabel("z") + + ax.scatter(ppts_x, ppts_y, ppts_z, c=pvals, s=10, cmap="Reds_r") + ax.scatter(npts_x, npts_y, npts_z, c=nvals, s=10, cmap="Blues") + plt.show() + diff --git a/xc.py b/xc.py new file mode 100644 index 0000000..51fe381 --- /dev/null +++ b/xc.py @@ -0,0 +1,196 @@ +from math import * + +import numpy as np +import numpy.fft +import numpy.linalg + + +############################# +##### LDA XC potentials ##### +############################# + +# Standard X potential of homogeneous electron gas (C is neglected) +def get_x_potential_lda_slater(s, rho): + return -np.cbrt((3 / pi) * rho) + + +# XC potential used for the GTH and HGH pseudopotentials. +# See https://arxiv.org/abs/mtrl-th/9512004 +def get_xc_potential_lda_teter93(s, rho): + a0 = 0.4581652932831429 + a1 = 2.217058676663745 + a2 = 0.7405551735357053 + a3 = 0.01968227878617998 + b1 = 1.0 + b2 = 4.504130959426697 + b3 = 1.110667363742916 + b4 = 0.02359291751427506 + + rs = np.cbrt(3 / (4 * pi * rho)) + + # Energy density (epsilon) as a function of rs + eps_num = a0 + rs * (a1 + rs * (a2 + a3 * rs)) + eps_den = rs * (b1 + rs * (b2 + rs * (b3 + rs * b4))) + eps = -eps_num / eps_den + + # Derivative of energy density with respect to rs + deps_num = a0 * (b1 + rs * (2*b2 + 3*b3*rs + 4*b4*rs**2)) + deps_num += rs**3 * (2*a1*b3 + 3*a1*b4*rs - 2*a3*b1 - a3*b2*rs + a3*b4*rs**3) + deps_num += rs**2 * (a1*b2 - a2*b1 + a2*rs**2 * (b3 + 2*b4*rs)) + deps = deps_num / eps_den**2 + + # Chain rule: derivative of rs with respect to rho + chain = - 1 / np.cbrt(36 * sqrt(pi) * rho**4) + + return eps + rho * deps * chain + + +############################# +##### GGA XC potentials ##### +############################# + +# Because our grid usually has a non-orthogonal rectilinear basis, +# we first need some helper functions to calculate derivatives. + + +# Concept from tensor calculus, we'll need this later +def inverse_metric_tensor(s): + basis = s.crystal.a / s.Nres + + g = np.zeros((3, 3)) + for i in range(3): + for j in range(3): + g[i, j] = np.dot(basis[i], basis[j]) + + # Never fails if basis is linearly independent? + ginv = np.linalg.inv(g) + + return ginv + + +# Compute Laplacian of f on a grid with non-orthogonal basis vectors. +# Shamelessly taken from https://math.stackexchange.com/questions/2632598/ +def laplacian(s, f): + g = inverse_metric_tensor(s) + + lf = np.zeros_like(f) + for i in range(s.Nres): + for j in range(s.Nres): + for k in range(s.Nres): + # Wrap indices at the boundaries + inext = (i + 1) % s.Nres + jnext = (j + 1) % s.Nres + knext = (k + 1) % s.Nres + # Calculate a lot of finite differences + lf[i, j, k] += -2 * (g[0, 0] + g[1, 1] + g[2, 2]) * f[i, j, k] + lf[i, j, k] += g[0, 0] * (f[i - 1, j, k] + f[inext, j, k]) + lf[i, j, k] += g[1, 1] * (f[i, j - 1, k] + f[i, jnext, k]) + lf[i, j, k] += g[2, 2] * (f[i, j, k - 1] + f[i, j, knext]) + lf[i, j, k] += 0.5 * g[0, 1] * (f[i - 1, j - 1, k] - f[i - 1, jnext, k]) + lf[i, j, k] += 0.5 * g[0, 1] * (f[inext, jnext, k] - f[inext, j - 1, k]) + lf[i, j, k] += 0.5 * g[0, 2] * (f[i - 1, j, k - 1] - f[i - 1, j, knext]) + lf[i, j, k] += 0.5 * g[0, 2] * (f[inext, j, knext] - f[inext, j, k - 1]) + lf[i, j, k] += 0.5 * g[1, 2] * (f[i, j - 1, k - 1] - f[i, j - 1, knext]) + lf[i, j, k] += 0.5 * g[1, 2] * (f[i, jnext, knext] - f[i, jnext, k - 1]) + + return lf + + +# Compute gradient of f on a grid with non-orthogonal basis vectors +def gradient(s, f): + basis = s.crystal.a / s.Nres + g = inverse_metric_tensor(s) + + dfdx = np.zeros_like(f) + dfdy = np.zeros_like(f) + dfdz = np.zeros_like(f) + for i in range(s.Nres): + for j in range(s.Nres): + for k in range(s.Nres): + # Technically, we have periodic boundary conditions, but + # in practice we should avoid crossing the boundaries, + # hence the following verbose derivative calculations. + + # Derivative along direction indexed by i + if i == 0: + df0 = 1.0 * (f[i + 1, j, k] - f[i, j, k]) + elif i == s.Nres - 1: + df0 = 1.0 * (f[i, j, k] - f[i - 1, j, k]) + else: + df0 = 0.5 * (f[i + 1, j, k] - f[i - 1, j, k]) + # Derivative along direction indexed by j + if j == 0: + df1 = 1.0 * (f[i, j + 1, k] - f[i, j, k]) + elif j == s.Nres - 1: + df1 = 1.0 * (f[i, j, k] - f[i, j - 1, k]) + else: + df1 = 0.5 * (f[i, j + 1, k] - f[i, j - 1, k]) + # Derivative along direction indexed by k + if k == 0: + df2 = 1.0 * (f[i, j, k + 1] - f[i, j, k]) + elif k == s.Nres - 1: + df2 = 1.0 * (f[i, j, k] - f[i, j, k - 1]) + else: + df2 = 0.5 * (f[i, j, k + 1] - f[i, j, k - 1]) + + # Get the local gradient at [i, j, k] + df = [df0, df1, df2] + gradf = np.zeros(3, dtype=f.dtype) + for l in range(3): + for m in range(3): + gradf += g[l, m] * df[l] * basis[m] + + dfdx[i, j, k] = gradf[0] + dfdy[i, j, k] = gradf[1] + dfdz[i, j, k] = gradf[2] + + return dfdx, dfdy, dfdz + + +# C potential by Lee, Yang and Parr, very popular for DFT. +# See Eqs.(23)&(24) in https://doi.org/10.1103/PhysRevB.37.785 +def get_c_potential_gga_lyp(s, rho): + lr = laplacian(s, rho) + drdx, drdy, drdz = gradient(s, rho) + gr2 = drdx**2 + drdy**2 + drdz**2 + + a = 0.04918 + b = 0.132 + c = 0.2533 + d = 0.349 + CF = 0.3 * (3 * pi**2)**(2/3) + + # Abbreviations + r = rho + rr = r**(1/3) + er = np.exp(-c / rr) + + F1 = 1 / (d/rr + 1) + dF1 = d / (3*rr**2 * (d + rr)**2) + ddF1 = 2*d**2 / (9*rr**8 * (d/rr + 1)**3) - 4*d / (9*rr**7 * (d/rr + 1)**2) + + G1 = F1 * er / rr**5 + dG1 = (F1 * (c - 5*rr) + 3*dF1*rr**4) * er / (3*r**3) + ddG1 = (F1 * (c - 10*rr) * (c - 4*rr) + 6*dF1*rr**4 * (c - 5*rr) + 9*ddF1*rr**8) * er / (9*rr**13) + + V = -a * (dF1*r + F1) + V += -a*b*CF * rr**5 * (dG1*r + (8/3)*G1) + V += -(1/ 4)*a*b * ( ddG1*r*gr2 + dG1 * (3*gr2 + 2*r*lr) + 4*G1*lr) + V += -(1/72)*a*b * (3*ddG1*r*gr2 + dG1 * (5*gr2 + 6*r*lr) + 4*G1*lr) + + return V + + +# X potential by Van Leeuwen and Baerends, inspired by Becke88. +# See Eqs.(50)&(54) in https://doi.org/10.1103/PhysRevA.49.2421 +def get_x_potential_gga_lb(s, rho): + drdx, drdy, drdz = gradient(s, rho) + x = np.sqrt(drdx**2 + drdy**2 + drdz**2) / rho**(4/3) + + beta = 0.05 + fx = - beta * x**2 / (1 + 3 * beta * x * np.arcsinh(x)) + + V = get_x_potential_lda_slater(s, rho) + return V + fx * rho**(1/3) + + -- cgit v1.2.3