diff options
Diffstat (limited to 'density.py')
-rw-r--r-- | density.py | 198 |
1 files changed, 198 insertions, 0 deletions
diff --git a/density.py b/density.py new file mode 100644 index 0000000..0162baf --- /dev/null +++ b/density.py @@ -0,0 +1,198 @@ +from math import * +from itertools import permutations + +import numpy as np +import numpy.fft +import numpy.linalg + +import eigen +import xc + + +# Get electron density by filling orbitals with electrons +def get_density(psis, Nele): + rho = np.zeros_like(psis[0], dtype="float64") + for n in range(Nele): + rho += abs(psis[n // 2])**2 + return rho + + +# Given a density, calculate the Hartree (Coulomb) potential +def get_hartree_potential(s, rho_r): + rho_q = np.fft.fftn(rho_r) + + V_q = np.zeros_like(rho_q) + for i in range(s.Nres): + for j in range(s.Nres): + for k in range(s.Nres): + # Ignore the q=0 component, which gives a constant shift. + # This makes the cell neutral, eliminating long-range effects. + if s.q2[i, j, k] != 0: + V_q[i, j, k] = 4 * pi * rho_q[i, j, k] / s.q2[i, j, k] + V_r = np.fft.ifftn(V_q) + + return np.real(V_r) + + +# Pulay mixing: try this one weird trick to accelerate your convergence! +# Linearly combine several old densities to get a new density, +# which is hopefully closer to the self-consistent optimum. +def mix_densities(fhist, Rhist): + # Number of old functions to combine + Nmix = min(5, max(2, len(fhist))) + + # We need at least two old densities to work with. + # On the first SCF iteration, we return the raw output density. + if len(fhist) < Nmix: + return fhist[-1] + Rhist[-1] + + # Number of initial elements to ignore in the history + offset = len(fhist) - Nmix + + # Set up the system matrix + A = np.zeros((Nmix, Nmix), dtype=fhist[-1].dtype) + for r in range(Nmix): + for c in range(Nmix): + A[r, c] = np.vdot(Rhist[offset + r], Rhist[offset + c]) + + # Solve the system using matrix inversion, see for example + # Eq.(38) in https://doi.org/10.1103/PhysRevB.54.11169 + try: + Ainv = np.linalg.inv(A) + alphas = Ainv.sum(axis=1) + alphas /= alphas.sum() + except np.linalg.LinAlgError: # A is singular + alphas = np.zeros(Nmix) + alphas[-1] = 1.0 + + # Construct next function as a linear combination of history. + # Beta "nudges" result, to prevent getting stuck in a subspace. + beta = 0.1 + f = np.zeros_like(fhist[-1]) + for n in range(Nmix): + f += alphas[n] * (fhist[offset + n] + beta * Rhist[offset + n]) + + return f + + +# Generate list of k-points that nicely covers the first Brillouin zone +def sample_brillouin_zone(s): + N = s.bzmesh + # Generate N0xN1xN2 k-points using Monkhorst-Pack scheme + kpoints = [] + for n0 in range(1, N[0] + 1): + for n1 in range(1, N[1] + 1): + for n2 in range(1, N[2] + 1): + t0 = (2 * n0 - N[0] - 1) / (2 * N[0]) * s.crystal.b[0] + t1 = (2 * n1 - N[1] - 1) / (2 * N[1]) * s.crystal.b[1] + t2 = (2 * n2 - N[2] - 1) / (2 * N[2]) * s.crystal.b[2] + kpoints.append(t0 + t1 + t2) + + # Thanks to symmetry, most of these k-points are redundant. + # which we will exploit to greatly improve performance. + + # This can be done much better; these are my messy hacks. + + # Remember for each point, what it is equivalent to (maybe only itself) + equivto = list(range(len(kpoints))) + for i in range(len(kpoints)): + # Cubic crystal symmetry: + # Permute Cartesian components to find equivalences (this feels dirty) + perms = set(permutations(tuple(abs(kpoints[i])))) + for j in range(0, i): # only consider previous points + if tuple(abs(kpoints[j])) in perms: + equivto[i] = j + break + + # Each k gets a weight based on how many equivalent points it has + weights = [] + for i in range(len(kpoints)): + weights.append(equivto.count(i)) + # Delete redundant points, yielding a >75% reduction in computation + for i in reversed(range(len(kpoints))): + if weights[i] == 0: + del kpoints[i] + del weights[i] + + return kpoints, weights + + +# Static local potential contribution, from the atoms in the cell +def get_local_external_potential(s, Nele): + Vloc_r = np.zeros((s.Nres, s.Nres, s.Nres)) + rhon_r = np.zeros((s.Nres, s.Nres, s.Nres)) + for a in s.atoms: + Vloc_r += a.get_local_potential() + rhon_r += a.get_local_density() + + # Coulomb potential from ionic compensation charge + rhon_r /= np.sum(rhon_r) * s.dr / Nele # ensure normalization + Vion_r = get_hartree_potential(s, -rhon_r) + + return Vloc_r + Vion_r + + +# The whole point of DFT: iteratively find the self-consistent electron density +def get_selfconsistent_density(s, thresh=1e-8): + # Initial guess for mean electron density + rho = np.zeros((s.Nres, s.Nres, s.Nres)) + Nele = 0 # number of electrons + for a in s.atoms: + rho += a.guess_density() + Nele += a.params.Zion + rho /= np.sum(rho) * s.dr / Nele # ensure normalization + + # This is the "external potential" we use as a base (sans nonlocality) + Vext = get_local_external_potential(s, Nele) + + # Get weighted set of k-points in first Brillouin zone + ksamples, kweights = sample_brillouin_zone(s) + + # Each k-point gives a certain density contribution + rhos = [] + for k in ksamples: + rhos.append(rho.copy()) + + # Histories for Pulay density mixing + rho_hist = [] + residue_hist = [] + # You could do Pulay mixing separately for each k-point's density, + # but according to my tests, that makes no meaningful difference. + + cycles = 0 + improv = 1 + # Main self-consistency loop, where the magic happens + while improv > thresh: + cycles += 1 + print("Self-consistency cycle {} running...".format(cycles)) + + # Calculate additional potential from mean density + Vh = get_hartree_potential(s, rho) + #Vx = xc.get_x_potential_gga_lb(s, rho) + #Vc = xc.get_c_potential_gga_lyp(s, rho) + #Vxc = xc.get_xc_potential_lda_teter93(s, rho) + Vxc = xc.get_x_potential_lda_slater(s, rho) + Veff = Vext + Vh + Vxc + + for i in range(len(ksamples)): + prefix = " ({}/{}) ".format(i + 1, len(ksamples)) + energies, psis_r, psis_q = eigen.solve_kohnsham_equation(s, Veff, ksamples[i], prefix) + rhos[i] = get_density(psis_r, Nele) + + # Calculate new average density from orbitals + rho_new = np.zeros((s.Nres, s.Nres, s.Nres)) + for i in range(len(rhos)): + rho_new += kweights[i] * rhos[i] + rho_new /= sum(kweights) + + # Get next cycle's input density from Pulay mixing + rho_hist.append(rho) + residue_hist.append(rho_new - rho) + rho = mix_densities(rho_hist, residue_hist) + + # Use the norm of the residual as a metric for convergence + improv = np.sum(residue_hist[-1]**2) * s.dr / Nele**2 + print(" Cycle {} done, improvement {:.2e}".format(cycles, improv)) + + print("Found self-consistent electron density in {} cycles".format(cycles)) + return rho, Veff |