"""General tools.
Copyright © 2019-2023 HQS Quantum Simulations GmbH. All Rights Reserved.
"""
from typing import Callable, Union, Optional, TypeVar, Type
import numpy as np
from numpy.typing import ArrayLike
from scipy.sparse import csr_array, hstack, vstack
from scipy.special import eval_chebyt
from lattice_solver.chebyshev import (
chebyshev_scaling,
calc_prefactors,
apply_jackson,
resolvent_greens_function_from_moments,
)
from .qolossal_io import (
print_progress,
)
csr_or_ndarr = TypeVar("csr_or_ndarr", csr_array, np.ndarray)
tup_or_mat = TypeVar("tup_or_mat", tuple, csr_array, np.ndarray)
fl_cmplx_or_ndarr = TypeVar("fl_cmplx_or_ndarr", float, complex, np.ndarray)
fl_or_ndarr = TypeVar("fl_or_ndarr", float, np.ndarray)
_KB_in_EV_over_K = 8.617333262145e-5 # Boltzmann constant in ev K^-1
_EH_in_EV = 27.211386245988 # Hartree energy in eV
_HBAR_in_EV_S = 6.582119569e-16 # hbar in eV s
_C_in_M_over_S = 299792458 # c in m/s
_A0_in_AA = 5.29177210903e-1 # Bohr radius in angstroms
"""Vacuum permittivity times the bohr radius
This factor is obtained from the definition of eps_0 = e^2/a_0 E_h where a_0 is the Bohr radius and
E_h is the Hartree energy. This has this value because we are in rationalized units, i.e. a system
in which Coulomb's law has a prefactor of 1/(4 pi eps_0) and Maxwell's equations have NO 4 pi."""
_EPSILON_0 = 1 / (4 * np.pi * _EH_in_EV * _A0_in_AA)
[docs]
def matrix_to_linop(M: csr_array) -> Callable:
"""Method generating a linear operator given its matrix representation.
Args:
M (csr_array): Matrix representation of the linear operator.
Returns:
Callable: Linear operator
"""
def _linop(v: np.ndarray, out: np.ndarray) -> np.ndarray:
out[:] = M @ v
return out
return _linop
[docs]
def calcLanczos(dotProd: Callable, x0: np.ndarray, Order: int, tolerance: float = 1e-11) -> tuple:
"""Calculate Lanczos expansion of a matrix with dot product dotProd on the vector x0.
Args:
dotProd (Callable): dot product function.
x0 (np.ndarray): vector on which to build the Krylov space
Order (int): order of the expansion
tolerance (float): Tolerance of non-orthogonality. Defaults to 1e-11.
Returns:
tuple: alphas and betas corresponding to the expansion.
"""
# initialize coefficients and first vectors
alphas = np.zeros(Order)
betas = np.zeros(Order - 1)
norm = np.linalg.norm(x0)
dim = x0.flatten().shape[0]
q = x0.reshape(dim) / norm
r = np.zeros_like(q)
r = dotProd(q, out=r)
# analytically a is real even if the matrix corresponding to dotProd is complex, but
# numerically it will always have a negligible imaginary part, so we have to explicitly take
# the real part
a = (np.conj(r).dot(q)).real
alphas[0] = a
r -= a * q
beta = np.linalg.norm(r)
betas[0] = beta
# this will only be needed for a swap
q0 = np.zeros(0)
# start the recursive procedure to fint the coefficients
for j in range(1, Order):
# stop the recursion if the last vector found is parallel to the second last, i.e. after
# the orthogonalization with the second last, the vector has 0 norm.
if np.abs(beta) < tolerance:
alphas = np.resize(alphas, j)
betas = np.resize(betas, j - 1)
return alphas, betas
# save the beta calculated last
betas[j - 1] = beta
# calculate the new vectors
q, q0 = q0, q
q = r / beta
r = dotProd(q, out=r)
# their overlaps
a = (np.conj(r).dot(q)).real
alphas[j] = a
# orthogonalize them
r -= a * q + beta * q0
beta = np.linalg.norm(r)
return alphas, betas
[docs]
def jacksonKernelCoeff(Order: int) -> np.ndarray:
"""Get Jackson kernel polynomial coefficients.
Args:
Order (int): Number of coefficients to return.
Returns:
np.ndarray: Jackson coefficients.
"""
# Jackson kernel for KPM
N = Order + 1
nn = np.arange(N)
gn = 1 / N * ((N - nn) * np.cos(np.pi * nn / N) + np.sin(np.pi * nn / N) / np.tan(np.pi / N))
return gn
[docs]
def ChebyMoments(
dotProd: Callable, vL: np.ndarray, vR: np.ndarray, Order: int, dtype: Type = float
) -> np.ndarray:
r"""Calculate Chebyshev moments corresponding to a matrix with dot product dotProd.
Given a Hamiltonian and two vectors :math:`v_L` and :math:`v_R`, the corresponding Chebyshev
moments will be
.. math::
\mu_n = \bra{v_L} T_n(H) \ket{v_R} = \bra{v_L} \ket{v_n}
with
.. math::
&v_0 = \ket{v_R}\\
&v_1 = H\ket{v_R} \\
&v_{n+1} = 2 H\ket{v_n} - \ket{v_{n-1}}
Args:
dotProd (Callable): dot product function.
vL (np.ndarray): vector multiplied on the left to get the moments
vR (np.ndarray): vector on which to build the Krylov space
Order (int): order of the expansion
dtype (Type): specifies the dtype of the problem.
Returns:
np.ndarray: Array containing the Chebyshev moments in ascending order.
"""
# minimun order is 2
if Order < 2:
Order = 2
# initialize the moments
moments: np.ndarray = np.zeros(Order + 1, dtype=dtype)
# the vi's are calculated from vR, while vL is only used to calculate the overlap.
# the first two moments have to be computed before starting the recursive iteration
moments[0] = np.vdot(vL, vR)
v0 = np.zeros(vR.shape, dtype=dtype)
v1 = np.zeros(vR.shape, dtype=dtype)
v2 = np.zeros(vR.shape, dtype=dtype)
v0[:] = vR
v1 = dotProd(vR, out=v1)
moments[1] = np.vdot(vL, v1)
for n in range(2, Order + 1):
# calculate the n-th vector
v2 = 2 * dotProd(v1, out=v2) - v0
# its scalar product is the corresponding moment
moments[n] = np.vdot(vL, v2)
# v0 is v_n-2, v1=v_n-1 and v2=vn. At the next iteration I will use v1 and v2 only as
# v0 and v1, respectively.
v0, v1, v2 = v1, v2, v0
return moments
[docs]
def ChebyDoubleMoments(
dotProd: Callable, vL: np.ndarray, vR: np.ndarray, P: Callable, Order: int, dtype: Type = float
) -> np.ndarray:
r"""Calculate Chebyshev moments of a double expansion.
Given a Hamiltonian, an operator and two vectors :math:`v_L` and :math:`v_R`, the corresponding
moments of the double Chebyshev expansion will be
.. math::
\mu_{mn} = \bra{v_L} T_m(\hat{H}) \hat{P} T_n(\hat{H}) \ket{v_R}
vL has to be in the ket form since the Hamiltonian will be applied from the left.
NOTE: dtype has been separated from the dtype of the vectors, since the final dtype will depend
on the operator P as well.
Args:
dotProd (Callable): dot product function.
vL (np.ndarray): vector (ket) multiplied on the left to get the moments.
vR (np.ndarray): vector (ket) on which to build the Krylov space.
P (Callable): linear operator for the expansion.
Order (int): order of the expansion.
dtype (Type): specifies the dtype of the problem.
Raises:
ValueError: if right and left vectors have incompatible shapes.
Returns:
np.ndarray: Array containing the Chebyshev moments in ascending order.
"""
if vL.shape != vR.shape:
raise ValueError("Right and left vectors have incompatible shapes.")
# minimun order is 2
if Order < 2:
Order = 2
# I construct |phi^R_n> by first building Tn(H)|v0> and then multiplying by P.
# <\phi^L_n| is just the corresponding polynomial applied to the initial left vector.
phiR = np.zeros((Order + 1, len(vR)), dtype=dtype)
phiR[0] = vR.copy()
phiR[1] = dotProd(phiR[0], out=phiR[1])
phiL = np.zeros((Order + 1, len(vL)), dtype=dtype)
phiL[0] = vL.copy()
phiL[1] = dotProd(phiL[0], out=phiL[1])
# Usual chebyshev recursion for both vectors
for n in range(2, Order + 1):
phiR[n] = 2 * dotProd(phiR[n - 1], out=phiR[n]) - phiR[n - 2]
phiL[n] = 2 * dotProd(phiL[n - 1], out=phiL[n]) - phiL[n - 2]
# multiply |\phi_R> by P from the left to obtain the final vector
for n in range(Order + 1):
phiR[n] = P(phiR[n], out=phiR[n])
# the moments C_mn are then simply the overlaps of the vectors constructed
# While we should perform phiL^dag \cdot phiR, the transposed is on phiR because of the
# shape of the vectors.
return np.conj(phiL) @ phiR.T
[docs]
def ChebyDoubleMomentsAccumulate(
dotProd: Callable,
gn: np.ndarray,
vL: np.ndarray,
vR: np.ndarray,
P: Callable,
Order: int,
dtype: Type = float,
) -> np.ndarray:
r"""Calculate Chebyshev moments of a double expansion.
Given a Hamiltonian, an operator and two vectors :math:`v_L` and :math:`v_R`, the corresponding
moments of the double Chebyshev expansion will be
.. math::
\mu_{mn} = \bra{v_L} T_m(\hat{H}) \hat{P} T_n(\hat{H}) \ket{v_R}
Args:
dotProd (Callable): dot product function.
gn (np.ndarray): expansion coefficients. Should have shape (Order+1, nFreq)
vL (np.ndarray): vector multiplied on the left to get the moments
vR (np.ndarray): vector on which to build the Krylov space
P (Callable): operator for the expansion
Order (int): order of the expansion
dtype (Type): specifies the dtype of the problem.
Raises:
ValueError: if right and left vectors have incompatible shapes.
Returns:
np.ndarray: frequency dependent contraction.
"""
if vL.shape != vR.shape:
raise ValueError("Right and left vectors have incompatible shapes.")
# minimun order is 2
if Order < 2:
Order = 2
# these matrices will have shape (number_of_freqs, linear_size_of_system).
phiR = np.zeros((gn.shape[1], len(vR)), dtype=dtype)
v0R = np.zeros(vR.shape, dtype=dtype)
tmp = np.zeros_like(v0R)
v1R = np.zeros(vR.shape, dtype=dtype)
v2R = np.zeros(vR.shape, dtype=dtype)
v0R[:] = vR
v1R = dotProd(v0R, out=v1R)
# start accumulating the sum by explicitly calculating gm_i * T_i(H) |r> for i=0, 1
phiR += np.expand_dims(gn[0], axis=1) * P(v0R, tmp)
phiR += np.expand_dims(gn[1], axis=1) * P(v1R, tmp)
phiL = np.zeros((gn.shape[1], len(vR)), dtype=dtype)
v0L = np.zeros(vR.shape, dtype=dtype)
v1L = np.zeros(vR.shape, dtype=dtype)
v2L = np.zeros(vR.shape, dtype=dtype)
v0L[:] = vL
v1L = dotProd(v0L, out=v1L)
phiL += np.expand_dims(gn[0], axis=1) * v0L
phiL += np.expand_dims(gn[1], axis=1) * v1L
for nn in range(2, Order):
# calculate the nn-th vectors with nn>=2 via chebyshev recursion and continue
# accumulating the sums in the corresponding variables
v2R = 2 * dotProd(v1R, out=v2R) - v0R
phiR += np.expand_dims(gn[nn], axis=1) * P(v2R, tmp)
v0R, v1R, v2R = v1R, v2R, v0R
# same for left vectors
v2L = 2 * dotProd(v1L, out=v2L) - v0L
phiL += np.expand_dims(gn[nn], axis=1) * v2L
v0L, v1L, v2L = v1L, v2L, v0L
# perform contraction according to the shape of the matrices. I want to keep the frequency
# dependence
return np.einsum("fx, fx -> f", np.conj(phiL), phiR)
[docs]
def get_tr_resolvent(
moments: np.ndarray,
use_KPM: bool = True,
) -> Callable:
r"""Returns function evaluating resolvent based on Chebyshev moments.
This function generator returns a function evaluating the trace of the resolvent
.. math::
\mathrm{Tr} \big(z - \hat{H} \big)^{-1}
for a given 'frequency' :math:`z` (can be a numpy array).
NOTE: z is expected to be rescaled to the interval (-1, 1) (real part)
NOTE: The trace is estimated as the *average* over all (normalized) stochastic
samples. Hence, multiply the result by the *linear* size of the Hamiltonian to get
the "true" estimate for the trace.
Args:
moments (np.ndarray): Array containing Chebyshev moments
use_KPM (bool): Flag to control usage of the Jackson kernel. Defaults to True.
Returns:
Callable: Function evaluating the trace of the resolvent for a given frequency.
"""
# list of indices ("powers") for expansion
Order = moments.size - 1
# set constants
C = np.ones(Order + 1, dtype=complex)
# "re-sum" Chebyshev expansion using Jackson kernel
if use_KPM:
C *= jacksonKernelCoeff(Order)
_scaling = chebyshev_scaling()
# define function evaluating the resolvent at a given "frequency"
def cheby_resolvent(z: fl_cmplx_or_ndarr) -> fl_cmplx_or_ndarr:
# ensure that z is numpy array
_z: np.ndarray = np.atleast_1d(z)
coefficients = C * calc_prefactors(_scaling, _z, Order + 1)
return coefficients @ moments
# return function computing resolvent based on Chebyshev expansion
return cheby_resolvent
[docs]
def get_GF_coeff(w: Union[np.ndarray, float], Order: int) -> np.ndarray:
r"""Get the coefficients for the Chebyshev expansion of the Green's function.
These are defined as
.. math::
g_n(z) = \mp i (2 - \delta_{n0}) \frac{e^{\mp i n \arccos{z}}}{\sqrt{1 - z^2}}
where the sign corresponds to retarded/advanced components.
Args:
w (Union[np.ndarray, float]): Frequenc(y)ies at which to compute the Gp.
Order (int): Order of the expansion.
Returns:
np.ndarray: Coefficients gm at frequenc(y)ies w and order order.
"""
# sign + for w.imag = 0. This can be done since the
# expression below is stable for real values as well
# TODO: Address whether "ones" is truly needed; perhaps use atleast_1d on sign itself?
sign = (2 * (w.imag >= 0) - 1) * np.ones(len(np.atleast_1d(w)))
m = np.arange(Order + 1)
arccos_w = np.emath.arccos(w) # using emath allows for continuation into complex plane
tmp = -sign * 1j * np.expand_dims(m, axis=1) * arccos_w
# using sin(arccos_w) instead of sqrt(1 - w * w) causes the denominator to follow
# the same convention that exp(i arccos_w) uses, no spurious sign cancellation.
return -np.expand_dims(2 - (m == 0), axis=1) * 1j * sign * np.exp(tmp) / np.sin(arccos_w)
[docs]
def idxListFromSelector(selector: Union[str, ArrayLike], dim: Optional[int] = None) -> np.ndarray:
"""Generates list of indices based on the selector.
Args:
selector (Union[str, ArrayLike]): Selector
dim (Optional[int]): Dimension of the matrix to generate the selectors of.
Returns:
np.ndarray: Indices corresponding to the selector.
Raises:
ValueError: if the selector does not have a valid value or type.
"""
if isinstance(selector, str):
if dim is None:
raise ValueError("Dimension of the GF not specified.")
if selector == "full":
# build the full list of indices with the first column counting faster than the second.
# this is to ensure I am returning the indices of the upper triangular part. Although
# not explicitly used, it is important to specify for future development
row = np.tile(np.arange(dim), dim)
col = np.repeat(np.arange(dim), dim)
idxArray = np.vstack((row, col)).T
elif selector == "diag":
tmp = np.arange(dim)
idxArray = np.vstack((tmp, tmp)).T
else:
raise ValueError("Selector value not valid.")
elif isinstance(selector, np.ndarray) or isinstance(selector, list):
idxArray = np.array(selector)
else:
raise ValueError("Selector type not valid.")
return idxArray
[docs]
def generateRandomVector(
selector: np.ndarray,
out: np.ndarray,
dtype: Union[Type[complex], Type[float]] = float,
) -> np.ndarray:
"""Generate random normalized vector given a selector.
Args:
selector (np.ndarray): array selecting which subspace to consider.
out (np.ndarray): preallocated output array.
dtype (Union[Type[complex], Type[float]]): Data type of the output array. Defaults to
float.
Returns:
np.ndarray: normalized random vector according to selector.
Raises:
ValueError: if dtype is invalid or if the selector is not made of just ones and zeros.
"""
dimension = len(selector)
if not np.all((selector == 1) + (selector == 0)):
raise ValueError("The selector is not made of only ones and zeros.")
if dtype is float:
# random value in (-0.5, 0.5)
out[:] = np.random.rand(dimension) - 0.5
elif dtype is complex:
# random phase in (0, 2pi)
out[:] = 2.0j * np.pi * np.random.rand(dimension)
# associated complex number
out[:] = np.exp(out)
else:
raise ValueError(f"Invalid dtype {dtype} for random vector generation.")
# select accordingly
out *= selector
# normalize to 1 after selection
out /= np.linalg.norm(out)
return out
[docs]
def blocks2full(blocks: tuple[csr_or_ndarr, csr_or_ndarr, csr_or_ndarr]) -> csr_or_ndarr:
"""Construct matrix from blocks.
Within the spin representation ((uu, ud), (du, dd)) the blocks passed will be in the following
order: [uu, dd, ud].
Args:
blocks (tuple[csr_or_ndarr, csr_or_ndarr, csr_or_ndarr]): Blocks composing the matrix.
Returns:
csr_or_ndarr: full matrix composed by the input blocks
"""
# the method will not check for inconsistencies of the input tuple as mypy will catch
# most of those thanks to the annotation.
if isinstance(blocks[0], csr_array):
tmp1 = hstack((blocks[0], blocks[2]), format="csr")
tmp2 = hstack((blocks[2].conj().T, blocks[1]), format="csr")
return vstack((tmp1, tmp2), format="csr")
else:
tmp1 = np.hstack((blocks[0], blocks[2]))
tmp2 = np.hstack((blocks[2].conj().T, blocks[1]))
return np.vstack((tmp1, tmp2))
[docs]
def fermi_distr(energies: np.ndarray, mu: Union[float, np.ndarray], T: float) -> np.ndarray:
r"""Fermi distribution.
Evaluated as
.. math::
f(\epsilon, \mu) = \frac{1}{e^{(\epsilon - \mu) / k_B T} + 1}
with :math:`k_B` the Boltzmann constant.
Args:
energies (np.ndarray): energies at which to evaluate the Fermi distribution.
mu (Union[float, np.ndarray]): chemical potential(s).
T (float): Temperature in Kelvins.
Returns:
np.ndarray: Fermi distribution.
"""
T *= _KB_in_EV_over_K
# calculate energies-mu with the correct shape in case mu is an array.
# This will have shape (len(mu), len(energies))
eps_mu = energies - np.expand_dims(np.atleast_1d(mu), axis=1)
fermi_f = np.zeros_like(eps_mu)
# if T==0 the fermi function is just 1 - heaviside, otherwise compute it
if T == 0:
fermi_f = 1 - np.heaviside(eps_mu, 0.5) # set the value at eps=mu to 0.5
else:
# use two versions depending on the sign of eps-mu to avoid overflows
fermi_f[eps_mu > 0] = np.exp(-(eps_mu[eps_mu > 0]) / T) / (
1 + np.exp(-(eps_mu[eps_mu > 0]) / T)
)
fermi_f[eps_mu <= 0] = 1 / (np.exp((eps_mu[eps_mu <= 0]) / T) + 1)
# fermi_f will have shape (len(energies), 1) if only one chemical potential has been provided.
# Squeeze this trivial dimension if needed
return np.squeeze(fermi_f)
[docs]
def getFermiPadeParams(N: int) -> tuple[np.ndarray, np.ndarray]:
"""Returns Pade-Matsubara poles and residues.
Computes the poles and residues derived from the continued fraction representation of the
Fermi function [Ref: Ozaki, PRB 75, 035123 (2007)]
Args:
N (int): number of poles with positive imaginary part (total number of poles is 2N).
Returns:
tuple[np.ndarray, np.ndarray]: tuple of two numpy arrays of shape (2 N,).
first element: (complex) Pade-Matsubara poles (sorted with ascending imaginary part).
second element: (real) Pade-Matsubara residues corresponding to poles.
"""
# initialized matrix
M = np.zeros(
(
2 * N,
2 * N,
),
dtype=float,
)
# set matrix yielding Pade-Matsubara poles and residues
for i in range(1, 2 * N):
M[i, i - 1] = M[i - 1, i] = 0.5 / np.sqrt((2.0 * i - 1) * (2.0 * i + 1))
# diagonalize matrix
values, vectors = np.linalg.eigh(M)
# set Pade poles
iz = 1.0j / values
# set Pade residues
R = np.power(vectors[0, :] / (2.0 * values), 2)
# sort poles and residues
sortIdx = np.argsort(iz.imag)
# now return poles and residues
return iz[sortIdx], R[sortIdx]
[docs]
def dir2idx(tensor_element: str) -> tuple[int, ...]:
"""Transforms liretal cartesian direction into index.
Args:
tensor_element (str): Selected tensor element in cartesian coordinates, e.g. 'xx'.
Raises:
ValueError: If the selected tensor is not valid.
Returns:
tuple[int, int]: Indices corresponding to selected the direction.
"""
# NOTE: this will handle mixed dfirections in the future, e.g. x/sqrt(2) + y/sqrt(2)
_dir2idx = {"x": 0, "y": 1, "z": 2}
if any((x not in _dir2idx.keys()) for x in tensor_element) or len(tensor_element) != 2:
raise ValueError(f"{tensor_element} is not a valid element of the conductivity tensor.")
directionL = _dir2idx[tensor_element[0]]
directionR = _dir2idx[tensor_element[1]]
return (directionL, directionR)
[docs]
def get_energies_from_e_window(
scale: float, shift: float, n_omegas: int, e_window: Optional[Union[list, np.ndarray]] = None
) -> np.ndarray:
"""Helper function returning discretized energies given the energy window.
Args:
scale (float): Scale used to transform frequencies.
shift (float): Shift used to transform frequencies.
n_omegas (int): _description_
e_window (Optional[Union[list, np.ndarray]], optional): energy window. Defaults to None.
Returns:
np.ndarray: scaled energy window or default one.
"""
if e_window is None:
energies_cheby = np.linspace(-1 + 1e-4, 1 - 1e-4, n_omegas)
return _scale2real(energies_cheby, scale, shift)
else:
return np.linspace(e_window[0], e_window[1], n_omegas)
[docs]
def _scale2real(frequencies: fl_cmplx_or_ndarr, scale: float, shift: float) -> fl_cmplx_or_ndarr:
"""Apply scaling to transform scaled frequencies to real ones.
Args:
frequencies (fl_cmplx_or_ndarr): array of scaled energies within (-1, 1).
scale (float): scale used to transform frequencies.
shift (float): shift used to transform frequencies.
Returns:
fl_cmplx_or_ndarr: Real energies array.
"""
return frequencies / scale + shift
[docs]
def get_DOS_from_moments(
moments: np.ndarray,
frequencies: np.ndarray,
scale: float,
shift: float,
use_jackson_kernel: bool = True,
eta: float = 0,
) -> np.ndarray:
"""Evaluate the DOS from the corresponding moments.
Args:
moments (np.ndarray): moments to expand.
frequencies (np.ndarray): frequencies at which to evaluate the DOS.
scale (float): scale used to evaluate the moments.
shift (float): shift used to evaluate the moments.
use_jackson_kernel (bool): whether to use the jackson kernel. Defaults to True.
eta (float): Green's function coefficients broadening.
Returns:
np.ndarray: density of states at the desired frequencies.
"""
n_moments = len(moments)
# initialize GF
GF = np.zeros((len(frequencies)), dtype=complex)
# set up the scaling
_scaling = chebyshev_scaling()
_scaling.set_scale(scale)
_scaling.set_shift(shift)
# calculate prefactors
_gm = calc_prefactors(_scaling, frequencies, eta, n_moments)
# apply kernel if necessary
if use_jackson_kernel:
_gm = apply_jackson(_gm)
# calculate the corresponding GF
resolvent_greens_function_from_moments(
scaling=_scaling, moments=moments, prefactors=_gm, res=GF
)
# return the DOS
return -GF.imag / np.pi
[docs]
def get_KG_conductivity_from_moments(
moments: np.ndarray,
chemical_potentials: np.ndarray,
scale: float,
shift: float,
total_volume: float,
system_linear_size: int,
use_jackson_kernel: bool = True,
eta: float = 0,
) -> np.ndarray:
"""Evaluate the Kubo-Greenwood conductivity from the corresponding moments.
Args:
moments (np.ndarray): moments to expand.
chemical_potentials (np.ndarray): chemical potentials at which to evaluate the
conductivity.
scale (float): scale used to evaluate the moments.
shift (float): shift used to evaluate the moments.
total_volume (float): total volume of the system. Used for normalization.
system_linear_size (int): total linear size of the system. Used for normalization.
use_jackson_kernel (bool): whether to use the jackson kernel. Defaults to True.
eta (float): Green's function coefficients broadening.
Returns:
np.ndarray: conductivity at the desired chemical potentials.
"""
# get the order of the expansion
Order = len(moments) - 1
# get the "GF" coefficients
_scaling = chebyshev_scaling()
_scaling.set_scale(scale)
_scaling.set_shift(shift)
_gm = calc_prefactors(_scaling, chemical_potentials, eta, Order + 1)
# apply kernel if necessary
if use_jackson_kernel:
_gm = apply_jackson(_gm)
# we only need -Im[gm]
gm = -_gm.imag
# sum over the moments and corresponding coefficients. Here I have two sets of coefficients
# since I have performed a "double" espansion of the two delta functions.
cond = np.einsum("wn, mn -> wm", gm, moments)
cond = np.einsum("wm, wm -> w", gm, cond)
# NOTE: the normalization might have to be made dependent on the selector. In this case it
# might go in the calculation of the total volume. E.g. if I am calculating the cond
# associated with a certain spin, I might want to have 2sigma_tot for each spin, since I am
# evaluating the response of only half of the sites. The total sigma will the be
# 1/(1/sigma_up + 1/sigma_down) = sigma_tot
"""NOTE: conductivity normalization and units
According to eq (145) of https://doi.org/10.1016/j.physrep.2020.12.001, the prefactor of the
conductivity is (hbar e^2) / (pi Volume scale^2). In our case, we have an additional factor
of 1/hbar^2 because our velocities are i[H, r] (so velocity times hbar). Moreover our scale is
the inverse of what they use in the paper. We are therefore left with
(e^2) scale^2 / (pi hbar Volume). pi hbar = h / 2, so we only have a factor of two left
to have the conductivity in units of e^2/h. Finally, the volume per site enters, i.e.
total_volume / system_linear_size, since our random states are normalized to 1 instead of
being normalized to sqrt(system_linear_size).
"""
cond *= 2 * scale**2 / (total_volume / system_linear_size)
return cond
[docs]
def get_KB_conductivity_from_moments(
moments: np.ndarray,
chemical_potentials: np.ndarray,
scale: float,
shift: float,
total_volume: float,
system_linear_size: int,
T: float,
NOmegasInt: int,
use_jackson_kernel: bool = True,
) -> np.ndarray:
"""Evaluate the Kubo-Bastin conductivity from the corresponding moments.
Args:
moments (np.ndarray): moments to expand.
chemical_potentials (np.ndarray): chemical potentials at which to evaluate the
conductivity.
scale (float): scale used to evaluate the moments.
shift (float): shift used to evaluate the moments.
total_volume (float): total volume of the system. Used for normalization.
system_linear_size (int): total linear size of the system. Used for normalization.
T (float): Temperature (in Kelvin) at which to compute the conductivity.
NOmegasInt (int): number of energies for performing the numerical integral.
use_jackson_kernel (bool): whether to use the jackson kernel. Defaults to True.
Returns:
np.ndarray: conductivity at the desired chemical potentials.
"""
print("Calculating expansion...\n")
# get the order of the expansion
Order = moments.shape[0] - 1
# scale moments by the KPM kernel if necessary. Twice since I have a double expansion.
if use_jackson_kernel:
kern_coeff = jacksonKernelCoeff(Order)
moments = np.einsum("m, n, mn -> mn", kern_coeff, kern_coeff, moments)
# scale moments by ((1 + delta_n0)(1 + delta_m0))^-1. This comes from the explicit expansion.
moments[:, 0] /= 2
moments[0, :] /= 2
# scale the chemical potentials into the chebyshev domain
chemical_potentials_cheby = _scale2transformed(chemical_potentials, scale, shift)
# scaled energies and corresponding delta_energy (used for integral)
dd = 1e-3
eps, delta_e = np.linspace(-1 + dd, 1 - dd, NOmegasInt, retstep=True)
# get Tm(epsilon) coefficients
Tm_eps = np.zeros((Order + 1, NOmegasInt))
for _m in range(Order + 1):
# the function eval_chebyt simply evaluates the _m-th chebyshev polynomial at all
# energies eps
Tm_eps[_m] = eval_chebyt(_m, eps)
# implementing eq (5) of paper for the coefficients of the expansion
# NOTE: this is the step that takes the longest out of all the post-moments calculations
# initialize quantities
n = np.arange(Order + 1)
Tm_ee = np.zeros((Order + 1))
Gamma_nm = np.zeros((Order + 1, Order + 1), dtype=complex)
integrand = np.zeros(NOmegasInt, dtype=float)
for ii, ee in enumerate(eps):
for _m in range(Order):
# the function eval_chebyt simply evaluates the _m-th chebyshev polynomial at the
# energy ee. Do this for every energy (instead of all at once) to have a smaller
# memory footprint, since this object would otherwise be of size NOmegas * Order
Tm_ee[_m] = eval_chebyt(_m, ee)
# evaluate eq (5) from reference, for energy ee
sqrt_term = 1.0j * n * np.sqrt(1 - ee**2)
exp_term = np.exp(1.0j * n * np.arccos(ee))
tmp1 = (ee - sqrt_term) * exp_term
tmp1 = np.einsum("n, m -> mn", tmp1, Tm_ee)
tmp2 = (ee + sqrt_term) * np.conj(exp_term)
tmp2 = np.einsum("m, n -> mn", tmp2, Tm_ee)
Gamma_nm = tmp1 + tmp2
# calculate the integrand partially, only the fermi distribution is missing
# FIXME: check whether casting it to real is always ok.
integrand[ii] = np.real(np.einsum("nm, mn", Gamma_nm, moments))
integrand /= (1 - eps**2) ** 2
# get fermi distribution for the energies and chemical potentials. The row index of the result
# spans the chemical potentials.
fermi_f = fermi_distr(eps, chemical_potentials_cheby, T * scale)
# this will correcly broadcast, since integrand only has the energy index (the one to
# integrate), which is the second axis of fermi_f
conductivity = np.sum(fermi_f * integrand, axis=1) * delta_e
# normalize result and return
# See qolossalTools::get_KG_conductivity_from_moments for more details. The extra factor of
# 4 comes from the expansion itself: in the reference paper there is a factor of 2 in the
# expressions they use for expanding the delta function and the gree's function (eqs 2
# and 3). The # additional factor of 4 present in eq (4) of the paper but not here comes
# from the fact that our scale = 2 / DeltaE (from the paper)
conductivity *= 2 * 4 * scale**2 / (total_volume / system_linear_size)
return conductivity
[docs]
def get_T_dependent_conductivity(
conductivity: np.ndarray, energies: np.ndarray, T: float
) -> np.ndarray:
"""Compute the temperature dependent Kubo-Greenwood conductivity from the zero temperature one.
Args:
conductivity (np.ndarray): Zero temperature Kubo-Greenwood conductivity.
energies (np.ndarray): Energies at which the conductivity is computed.
T (float): Temperature (in Kelvin) at which to compute the conductivity.
Raises:
ValueError: If the passed temperature is negative.
Returns:
np.ndarray: Temperature dependent Kubo-Greenwood conductivity.
"""
if T < 0:
message = "Temperatures are in Kelvin, negative temperatures are therefore not allowed. "
message += f"Passed temperature: {T}."
raise ValueError(message)
elif T > 0: # such that T==0 bypasses this completely and returns the given conductivity
n_frequencies = len(energies)
# I need to convolute the result by a fermi window. The integral is
# sigma(w, T) = /int dzz (-d fermi_f(zz - w)/d zz) * sigma(zz, 0)
# I calculate the derivative numerically, so I make sure I evaluate the fermi function
# at 1 more frequency and with the frequencies shifted back by half a dz, this way
# the diff is at the same frequencies as sigma(zz, 0).
# NOTE: the derivative of the fermi function is known analytically
# this is: beta * fermi_f * e^(beta*mu) * (1 - fermi_f)
# BUT numerically it is problematic at very low temperatures, since if this is so low
# that the ff is 1-heaviside function, fermi_f * (1 - fermi_f) is identically zero!!
df = energies[1] - energies[0]
f_to_integrate = np.linspace(energies[0] - df, energies[-1], n_frequencies + 1)
f_to_integrate += df / 2
fermi_f = fermi_distr(f_to_integrate, energies, T)
# fermi_f has shape (NOmegas, NOmegas + 1) so the derivative will be a square matrix
# axis=0 is the result axis, while the other one is the one we integrate
dfermi_f = -np.diff(fermi_f, axis=1) / df
# integrate. use abs(df) to avoid problems when the frequencies have inverse order.
conductivity = (conductivity * dfermi_f).sum(axis=1) * np.abs(df)
return conductivity
[docs]
def get_expectation_value_from_moments(
moments: np.ndarray,
T: float,
mu: float,
scale: float,
shift: float,
n_pade_freq: int,
) -> Union[float, complex]:
"""Evaluate the expectation value of an operator from the corresponding moments.
The implementation follows what outlined in the internal notes on linear response.
Args:
moments (np.ndarray): moments to expand.
T (float): Temperature at which to compute the expectation value.
mu (float): Chemical potential at which to compute the expectation value.
scale (float): scale used to evaluate the moments.
shift (float): shift used to evaluate the moments.
n_pade_freq (int): Number of pade' frequencies to use for the evaluation of the
fermi function.
Returns:
Union[float, complex]: expectation value.
"""
Order = len(moments) - 1
# get Fermi-Pade frequencies and residues to perform the matsubara summation
izf, Rf = getFermiPadeParams(n_pade_freq)
beta = 1 / (_KB_in_EV_over_K * T)
izf = izf / beta + mu
Rf /= beta
# get the Green's function coefficients
_scaling = chebyshev_scaling()
_scaling.set_scale(scale)
_scaling.set_shift(shift)
# get - the im part of the coefficient
gm = calc_prefactors(_scaling, izf, Order + 1)
gm = apply_jackson(gm)
# evaluate the sum over the moments and over the pade frequencies for calculating the
# non-trivial contribution to the expectation value
expV = (gm @ moments) @ Rf * scale + 0.0j
# add trivial contribution to the expectation value. This is basically just (1/2) the trace of
# the operator. The 0th moment represents the stochastic approximation to it, since it is
# the average <r| A |r> with r random vector.
expV += moments[0] / 2
return expV
[docs]
def get_optical_conductivity_from_moments(
moments: np.ndarray,
frequencies: np.ndarray,
scale: float,
shift: float,
total_volume: float,
system_linear_size: int,
T: float,
chemical_potential: float,
NOmegasInt: int,
use_jackson_kernel: bool = True,
eta: float = 1e-7,
verbose: int = 1,
) -> np.ndarray:
"""Evaluate the complex optical conductivity from the corresponding moments.
This routine implements eq. 32 of the internal notes on the optical conductivity.
Args:
moments (np.ndarray): conductivity moments to expand.
frequencies (np.ndarray): frequencies at which to evaluate the conductivity.
scale (float): scale used to evaluate the moments.
shift (float): shift used to evaluate the moments.
total_volume (float): total volume of the system. Used for normalization.
system_linear_size (int): total linear size of the system. Used for normalization.
T (float): Temperature in Kelvin.
chemical_potential (float): Chemical potential.
NOmegasInt (int): number of frequencies to use to calculate the E integral.
use_jackson_kernel (bool): whether to use the jackson kernel. Defaults to True.
eta (float): Green's function coefficients broadening.
verbose (int): verbosity level.
Returns:
np.ndarray: conductivity at the desired frequencies.
"""
# get the order of the expansion
Order = len(moments) - 1
scaled_frequencies = _scale2transformed(frequencies, scale, 0)
n_omegas = len(scaled_frequencies)
if verbose:
print("Calculating expansion...")
# scale moments by the KPM kernel. Twice since I have a double expansion.
kern_coeff = jacksonKernelCoeff(Order)
if use_jackson_kernel:
moments = np.einsum("n, mn -> mn", kern_coeff, moments)
moments = np.einsum("m, mn -> mn", kern_coeff, moments)
# scale passed chemical potential
scaled_chemical_potential = _scale2transformed(chemical_potential, scale, shift)
# scaled energies and corresponding delta_energy (used for integral)
eps, delta_e = np.linspace(-2, 2, NOmegasInt, retstep=True)
# eps is in "scaled" frequencies, so I need a scaling object with scale=1 and shift=0 to
# then calculate the coefficients.
_scaling0 = chebyshev_scaling()
# in order to more efficiently calculate the integral we contract the different objects in a
# specific way to avoid allocating large intermediate arrays. we follow what is in the notes
alpha_n = calc_prefactors(_scaling0, eps, eta, Order + 1).imag
a_m_p0 = calc_prefactors(_scaling0, eps, eta, Order + 1)
a_n_m0 = calc_prefactors(_scaling0, eps, -eta, Order + 1, advanced=True)
# calculate intermediate objects where we contract one of the moments indices
C_me_p = (
np.einsum("en, nm -> em", alpha_n, moments)
* fermi_distr(eps, scaled_chemical_potential, T)[:, np.newaxis]
* delta_e
)
C_ne_m = (
np.einsum("em, nm -> en", alpha_n, moments)
* fermi_distr(eps, scaled_chemical_potential, T)[:, np.newaxis]
* delta_e
)
conductivity = np.zeros(n_omegas, dtype=complex)
# for each frequency, contract the intermediate objects with the corresponding coefficients
for i_omega, omega in enumerate(scaled_frequencies):
Dm_p = calc_prefactors(_scaling0, eps + omega, eta, Order + 1) - a_m_p0
Dn_m = calc_prefactors(_scaling0, eps - omega, -eta, Order + 1, advanced=True) - a_n_m0
conductivity[i_omega] = np.einsum("em, em -> ", C_me_p, Dm_p)
conductivity[i_omega] += np.einsum("en, en -> ", C_ne_m, Dn_m)
print_progress(verbose, "norm", i_omega, n_omegas)
# NOTE: prefactor: I need to divide by the scaled frequencies because I am also evaluating the
# greens functions' coefficients at the scaled frequencies. All is scaled in this evaluation!
# The final result simply corresponds to the original frequencies.
conductivity *= -1.0j / ((total_volume / system_linear_size) * scaled_frequencies)
# final normalization
conductivity *= 2 * scale**2
return conductivity
[docs]
def get_real_optical_conductivity_from_moments(
moments: np.ndarray,
frequencies: np.ndarray,
scale: float,
shift: float,
total_volume: float,
system_linear_size: int,
T: float,
chemical_potential: float,
NOmegasInt: int,
use_jackson_kernel: bool = True,
eta: float = 1e-7,
) -> np.ndarray:
"""Evaluate the real part of the optical conductivity from the corresponding moments.
Args:
moments (np.ndarray): moments to expand.
frequencies (np.ndarray): frequencies at which to evaluate the conductivity.
scale (float): scale used to evaluate the moments.
shift (float): shift used to evaluate the moments.
total_volume (float): total volume of the system. Used for normalization.
system_linear_size (int): total linear size of the system. Used for normalization.
T (float): Temperature in Kelvin.
chemical_potential (float): Chemical potential.
NOmegasInt (int): number of frequencies to use to calculate the E integral.
use_jackson_kernel (bool): whether to use the jackson kernel. Defaults to True.
eta (float): Green's function coefficients broadening.
Returns:
np.ndarray: conductivity at the desired frequencies.
"""
# get the order of the expansion
Order = len(moments) - 1
scaled_frequencies = _scale2transformed(frequencies, scale, 0)
n_omegas = len(scaled_frequencies)
print("Calculating expansion...")
# scale moments by the KPM kernel. Twice since I have a double expansion.
kern_coeff = jacksonKernelCoeff(Order)
if use_jackson_kernel:
moments = np.einsum("n, mn -> mn", kern_coeff, moments)
moments = np.einsum("m, mn -> mn", kern_coeff, moments)
# scale passed chemical potential
chemical_potential = _scale2transformed(chemical_potential, scale, shift)
# scaled energies and corresponding delta_energy (used for integral)
eps, delta_e = np.linspace(-2, 2, NOmegasInt, retstep=True)
# corresponding "real" frequencies, to rescale if the flag is set. Only use the scale
# since these frequencies are the ones associated with the perturbation and not with the
# system's spectrum, meaning the shift is not needed.
cond = np.zeros(n_omegas)
# eps is in "scaled" frequencies, so I need a scaling object with scale=1 and shift=0 to
# then calculate the coefficients.
_scaling0 = chebyshev_scaling()
an = calc_prefactors(_scaling0, eps, eta, Order + 1).imag.T
for ii, ww in enumerate(scaled_frequencies):
int_window = fermi_distr(eps, chemical_potential, T) - fermi_distr(
eps + ww, chemical_potential, T
)
# the shifted coefficients are already multiplied with the integral window
am = get_GF_coeff((eps + ww + eta * 1.0j), Order).imag * int_window
# Anm at frequency ww is now just the integral of an and the shifted coefficients
Anm = np.einsum("ne, me -> nm", an, am) * delta_e
cond[ii] = np.einsum("nm, nm -> ", moments, Anm) / ww
print_progress(1, "norm", ii, n_omegas)
# normalize result and return
# See qolossalTools::get_KG_conductivity_from_moments for more details.
cond *= 2 * scale**2 / (total_volume / system_linear_size)
return cond