Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Pierre PELLETIER
3D percolation
Commits
6f1d0738
Commit
6f1d0738
authored
Mar 03, 2022
by
Pierre PELLETIER
Browse files
Added tight binding and 3D regensburg
parent
f1cb9379
Changes
25
Hide whitespace changes
Inline
Side-by-side
__pycache__/lib_an3d.cpython-35.pyc
View file @
6f1d0738
No preview for this file type
__pycache__/lib_an3d.cpython-37.pyc
View file @
6f1d0738
No preview for this file type
__pycache__/lib_an3d.cpython-39.pyc
View file @
6f1d0738
No preview for this file type
__pycache__/lib_tight_binding.cpython-39.pyc
0 → 100644
View file @
6f1d0738
File added
finite_size_scaling.py
View file @
6f1d0738
...
...
@@ -7,7 +7,7 @@ def f(x, a, b, c):
#bool_low_real = False
#
N_real = 200
N_real
=
200
#directory = 'data_plots_marcel/'
directory
=
'data_plots_marcel/high_real/'
...
...
@@ -54,6 +54,7 @@ for eta in eta_list:
std_list
[
i_size
]
=
np
.
std
(
energy_estimates
)
i_size
+=
1
std_list
=
np
.
array
(
std_list
)
print
(
mean_list
)
print
(
std_list
)
m
,
b
=
np
.
polyfit
(
std_list
,
mean_list
,
1
)
...
...
@@ -80,10 +81,20 @@ for eta in eta_list:
plt
.
close
()
#plt.show()
"""
plt.figure(figsize=(11,8))
plt.plot(size_list, mean_list)
plt.show()"""
fig
=
plt
.
figure
(
figsize
=
(
12
,
8
))
ax
=
fig
.
add_subplot
(
111
)
#plt.xlim(xmin=0, xmax = 0.12)
#plt.ylim(ymin = -0.7, ymax = -0)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Standard deviation of percolation energy'
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
/
np
.
sqrt
(
N_real
),
capsize
=
10
,
label
=
'Mean percolation energy'
)
plt
.
xlabel
(
'$N$'
)
plt
.
ylabel
(
'$(E_m-V_0)/V_0$'
)
plt
.
title
(
'$\eta = %.1f$'
%
eta
)
plt
.
legend
()
#plt.show()
plt
.
savefig
(
directory_plots
+
'SizeEvolutionEta'
+
str_eta
)
fig
=
plt
.
figure
(
figsize
=
(
12
,
8
))
ax
=
fig
.
add_subplot
(
111
)
...
...
@@ -91,14 +102,16 @@ for eta in eta_list:
#plt.ylim(ymin = -0.7, ymax = -0)
popt
,
pcov
=
optimize
.
curve_fit
(
f
,
size_list
,
mean_list
,
p0
=
(
-
0.5
,
1
,
-
1
))
print
(
'HEEEELP'
,
popt
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Mean percolation energy'
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Standard deviation of percolation energy'
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
/
np
.
sqrt
(
N_real
),
capsize
=
10
,
label
=
'Mean percolation energy'
)
indices
=
np
.
linspace
(
40
,
90
,
50
)
plt
.
plot
(
indices
,
[
f
(
indices
[
i
],
popt
[
0
],
popt
[
1
],
popt
[
2
])
for
i
in
range
(
len
(
indices
))],
color
=
'red'
,
label
=
'Fit for $E_m = E_c + a N^{b}$'
)
#plt.plot(size_list, [f(size_list[i], popt[0],popt[1], popt[2]) for i in range(len(size_list))])
plt
.
xlabel
(
'$N$'
)
plt
.
ylabel
(
'$(E_m-V_0)/V_0$'
)
plt
.
title
(
'$\eta = %.1f$'
%
eta
)
plt
.
text
(
0.
8
,
0.
7
5
,
'Result : $E_c$ = %.3f, $nu = %.3f $ '
%
(
popt
[
0
],
-
1
/
popt
[
2
]),
horizontalalignment
=
'center'
,
verticalalignment
=
'center'
,
transform
=
ax
.
transAxes
)
plt
.
text
(
0.
7
,
0.
6
5
,
'Result : $E_c$ = %.3f, $nu = %.3f $ '
%
(
popt
[
0
],
-
1
/
popt
[
2
]),
horizontalalignment
=
'center'
,
verticalalignment
=
'center'
,
transform
=
ax
.
transAxes
)
plt
.
legend
()
#plt.show()
plt
.
savefig
(
directory_plots
+
'SizeScalingFitEta'
+
str_eta
)
...
...
finite_size_scaling_anderson.py
View file @
6f1d0738
...
...
@@ -7,7 +7,7 @@ def f(x, a, b, c):
#bool_low_real = False
#
N_real = 20
0
N_real
=
20
#directory = 'data_plots_marcel/'
directory
=
'data_plots_marcel_anderson/'
...
...
@@ -31,12 +31,12 @@ plt.rc('text', usetex=True)
#size_list = [ 40,50,60, 70, 80, 90, 100]
size_list
=
[
50
,
60
,
70
,
80
,
90
,
100
]
size_list
=
[
50
,
60
,
70
,
80
,
90
,
100
,
150
]
#eta_list = [0.2, 0.5,1,2]
#eta_list = [0.05, 0.1,0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2]
#eta_list = [0.2, 0.5, 1]
eta_list
=
[
0.1
,
0.2
,
0.3
,
0.4
,
0.6
,
0.7
,
0.8
,
0.9
,
1.0
,
1.1
,
1.2
,
1.3
,
1.4
,
1.5
,
1.6
,
1.7
,
1.8
,
1.9
,
2.0
]
eta_list
=
[
0.1
,
0.2
,
0.3
,
0.4
,
0.6
,
0.7
,
0.8
,
0.9
,
1.0
,
1.1
,
1.2
,
1.3
,
1.4
,
1.5
,
1.6
,
1.7
,
1.8
,
1.9
]
#eta_list = [1]
for
eta
in
eta_list
:
...
...
@@ -54,6 +54,8 @@ for eta in eta_list:
mean_list
[
i_size
]
=
np
.
mean
(
energy_estimates
)
std_list
[
i_size
]
=
np
.
std
(
energy_estimates
)
i_size
+=
1
std_list
=
np
.
array
(
std_list
)
print
(
mean_list
)
print
(
std_list
)
...
...
@@ -86,14 +88,28 @@ for eta in eta_list:
plt.plot(size_list, mean_list)
plt.show()"""
try
:
fig
=
plt
.
figure
(
figsize
=
(
12
,
8
))
ax
=
fig
.
add_subplot
(
111
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Standard deviation of percolation energy'
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
/
np
.
sqrt
(
N_real
),
capsize
=
10
,
label
=
'Mean percolation energy'
)
plt
.
xlabel
(
'$N$'
)
plt
.
ylabel
(
'$(E_m-V_0)/V_0$'
)
plt
.
title
(
'$\eta = %.1f$'
%
eta
)
plt
.
legend
()
#plt.show()
plt
.
savefig
(
directory_plots
+
'SizeEvolutionEta'
+
str_eta
+
'.png'
,
dpi
=
300
)
fig
=
plt
.
figure
(
figsize
=
(
12
,
8
))
ax
=
fig
.
add_subplot
(
111
)
#plt.xlim(xmin=0, xmax = 0.12)
#plt.ylim(ymin = -0.7, ymax = -0)
popt
,
pcov
=
optimize
.
curve_fit
(
f
,
size_list
,
mean_list
,
p0
=
(
-
0.5
,
1
,
-
1
))
print
(
'HEEEELP'
,
popt
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Mean percolation energy'
)
indices
=
np
.
linspace
(
50
,
90
,
50
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Standard deviation of energy'
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
/
np
.
sqrt
(
N_real
),
capsize
=
10
,
label
=
'Mean percolation energy'
)
indices
=
np
.
linspace
(
50
,
150
,
50
)
plt
.
plot
(
indices
,
[
f
(
indices
[
i
],
popt
[
0
],
popt
[
1
],
popt
[
2
])
for
i
in
range
(
len
(
indices
))],
color
=
'red'
,
label
=
r
'Fit for $E_m = E_c + a N^{\frac{-1}{\nu}}$'
)
#plt.plot(size_list, [f(size_list[i], popt[0],popt[1], popt[2]) for i in range(len(size_list))])
plt
.
xlabel
(
'$N$'
)
...
...
finite_size_scaling_sinc.py
View file @
6f1d0738
...
...
@@ -7,7 +7,7 @@ def f(x, a, b, c):
#bool_low_real = False
#
N_real =
20
0
N_real
=
5
0
#directory = 'data_plots_marcel/'
directory
=
'data_plots_marcel_sinc/50real/'
...
...
@@ -55,6 +55,7 @@ for eta in eta_list:
std_list
[
i_size
]
=
np
.
std
(
energy_estimates
)
i_size
+=
1
std_list
=
np
.
array
(
std_list
)
print
(
mean_list
)
print
(
std_list
)
m
,
b
=
np
.
polyfit
(
std_list
,
mean_list
,
1
)
...
...
@@ -86,20 +87,35 @@ for eta in eta_list:
plt.plot(size_list, mean_list)
plt.show()"""
try
:
fig
=
plt
.
figure
(
figsize
=
(
12
,
8
))
ax
=
fig
.
add_subplot
(
111
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Standard deviation of percolation energy'
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
/
np
.
sqrt
(
N_real
),
capsize
=
10
,
label
=
'Mean percolation energy'
)
plt
.
xlabel
(
'$N$'
)
plt
.
ylabel
(
'$(E_m-V_0)/V_0$'
)
plt
.
title
(
'$\eta = %.1f$'
%
eta
)
plt
.
legend
()
#plt.show()
plt
.
savefig
(
directory_plots
+
'SizeEvolutionEta'
+
str_eta
+
'.png'
)
fig
=
plt
.
figure
(
figsize
=
(
12
,
8
))
ax
=
fig
.
add_subplot
(
111
)
#plt.xlim(xmin=0, xmax = 0.12)
#plt.ylim(ymin = -0.7, ymax = -0)
popt
,
pcov
=
optimize
.
curve_fit
(
f
,
size_list
,
mean_list
,
p0
=
(
-
0.5
,
1
,
-
1
))
print
(
'HEEEELP'
,
popt
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Mean percolation energy'
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
,
capsize
=
10
,
label
=
'Standard deviation of percolation energy'
)
plt
.
errorbar
(
size_list
,
mean_list
,
yerr
=
std_list
/
np
.
sqrt
(
N_real
),
capsize
=
10
,
label
=
'Mean percolation energy'
)
indices
=
np
.
linspace
(
50
,
90
,
50
)
plt
.
plot
(
indices
,
[
f
(
indices
[
i
],
popt
[
0
],
popt
[
1
],
popt
[
2
])
for
i
in
range
(
len
(
indices
))],
color
=
'red'
,
label
=
r
'Fit for $E_m = E_c + a N^{\frac{-1}{\nu}}$'
)
#plt.plot(size_list, [f(size_list[i], popt[0],popt[1], popt[2]) for i in range(len(size_list))])
plt
.
xlabel
(
'$N$'
)
plt
.
ylabel
(
'$(E_m-V_0)/V_0$'
)
plt
.
title
(
r
'$\eta = %.1f$'
%
eta
)
plt
.
text
(
0.
8
,
0.
7
5
,
r
'Result : $E_c$ = %.3f, $\nu = %.3f $ '
%
(
popt
[
0
],
-
1
/
popt
[
2
]),
horizontalalignment
=
'center'
,
verticalalignment
=
'center'
,
transform
=
ax
.
transAxes
)
plt
.
text
(
0.
7
,
0.
6
5
,
r
'Result : $E_c$ = %.3f, $\nu = %.3f $ '
%
(
popt
[
0
],
-
1
/
popt
[
2
]),
horizontalalignment
=
'center'
,
verticalalignment
=
'center'
,
transform
=
ax
.
transAxes
)
plt
.
legend
()
#plt.show()
plt
.
savefig
(
directory_plots
+
'SizeScalingFitEta'
+
str_eta
+
'.png'
)
...
...
generate_landscape_tight_binding.py
0 → 100644
View file @
6f1d0738
# THis generates Effective potentials for sinc correlated Speckle potentials
#export PYTHONPATH="$PWD"
import
time
import
math
import
numpy
as
np
import
lib_tight_binding
as
tight_binding
import
time
import
sys
import
matplotlib.pyplot
as
plt
if
__name__
==
"__main__"
:
potential_type
=
'tight_binding_uniform'
n_config
=
100
dimension
=
3
#eta_list = [ 1, 2, 0.2, 0.8, 0.6, 0.4, 1.2, 1.4, 1.6, 1.8]
eta_list
=
[
0.5
,
1
,
2
,
1.5
]
L_list
=
[
60
,
80
]
#L_list = [ 100, 120]
save_data
=
True
directory
=
potential_type
+
'/'
for
L
in
L_list
:
tab_dim
=
np
.
zeros
(
dimension
,
dtype
=
'int'
)
+
L
my_link_strength
=
-
1
for
eta
in
eta_list
:
print
(
"Starting for eta = %f , L = %d , N_config = %d
\n
"
%
(
eta
,
L
,
n_config
))
my_disorder_strength
=
eta
boundary_condition
=
'periodic'
#boundary_condition = 'NOTperiodic'
tab_boundary_condition
=
[
boundary_condition
]
*
3
H
=
tight_binding
.
Hamiltonian
(
dimension
,
tab_dim
,
tab_boundary_condition
=
tab_boundary_condition
,
disorder_type
=
potential_type
,
link_strength
=
my_link_strength
,
disorder_strength
=
my_disorder_strength
,
use_mkl_random
=
False
)
for
i
in
range
(
n_config
):
t0
=
time
.
time
()
if
i
%
10
==
0
:
print
(
"exploring configuration number %d"
%
i
)
W
=
tight_binding
.
compute_W
(
seed
=
i
+
1234
+
100
*
L
+
7777
*
int
(
100
*
eta
),
H
=
H
)
"""plt.imshow(H.disorder)
plt.colorbar()
plt.show()
plt.imshow(W)
plt.colorbar()
plt.show()
plt.imshow(np.log(np.abs(W)))
plt.colorbar()
plt.show()"""
if
save_data
:
str_eta
=
str
(
int
(
eta
))
+
'_'
+
str
(
int
(
10
*
(
eta
-
int
(
eta
))))
np
.
save
(
directory
+
potential_type
+
'_eta'
+
str_eta
+
'L'
+
str
(
L
)
+
'effectivePot'
+
str
(
i
),
W
)
print
(
'end save1'
)
print
(
time
.
time
()
-
t0
)
lib_an3d.py
View file @
6f1d0738
...
...
@@ -7,6 +7,7 @@ import numpy as np
#import scipy.sparse as ssparse
#import mkl_random
import
timeit
from
collections
import
deque
#import pypardiso
class
Potential
:
...
...
@@ -511,7 +512,85 @@ def compute_percolation2D(W):
print
(
"NOT PERCOLATE ???"
)
raise
ValueError
def
neighbours2D
(
ix
,
iy
,
Nx
,
Ny
):
def
compute_percolation2DnotComplete
(
W
):
#print(W)
Nx
,
Ny
=
W
.
shape
#Lx, Ly, Lz = Nx*dx, Ny*dy, Nz*dz
#diameter = max(Lx, max(Ly, Lz)) # To be changed !
# Clusters_touches_border = [] List of 6 booleans for each cluster
ind
=
np
.
unravel_index
(
np
.
argsort
(
W
,
axis
=
None
),
W
.
shape
)
ind
=
list
(
zip
(
ind
[
0
],
ind
[
1
]))
#list of indexes such that W is ordered (call W[ind[0]])
#clusters = [] #list of cluster, which are sets of indexes
cluster_appartenance
=
np
.
zeros_like
(
W
,
dtype
=
int
)
-
1
matching_table
=
[]
cluster_touches_border
=
[]
n_clusters
=
0
for
x
in
ind
:
#print("Start one loop")
#print(x)
END
=
False
#neighb = neighbours2D(x[0], x[1], Nx, Ny)
neighb
=
neighbours2Dsquare
(
x
[
0
],
x
[
1
],
Nx
,
Ny
)
#print(neighb)
cluster_found
=
[]
for
n
in
neighb
:
if
cluster_appartenance
[
n
]
>=
0
:
cluster_found
.
append
(
matching_table
[
cluster_appartenance
[
n
]])
if
len
(
cluster_found
)
==
1
:
c
=
cluster_found
[
0
]
#clusters[c].add(x)
cluster_appartenance
[
x
]
=
c
if
x
[
0
]
==
0
:
cluster_touches_border
[
c
][
0
]
=
True
if
x
[
1
]
==
0
:
cluster_touches_border
[
c
][
1
]
=
True
if
x
[
0
]
==
Nx
-
1
:
cluster_touches_border
[
c
][
2
]
=
True
if
x
[
1
]
==
Ny
-
1
:
cluster_touches_border
[
c
][
3
]
=
True
if
(
cluster_touches_border
[
c
][
0
]
and
cluster_touches_border
[
c
][
2
])
or
(
cluster_touches_border
[
c
][
1
]
and
cluster_touches_border
[
c
][
3
]):
END
=
True
elif
len
(
cluster_found
)
>
1
:
c
=
min
(
cluster_found
)
for
d
in
cluster_found
:
e
=
matching_table
[
d
]
if
e
!=
c
:
for
i
in
range
(
len
(
matching_table
)):
if
matching_table
[
i
]
==
e
:
matching_table
[
i
]
=
c
#matching_table[d] = c
#clusters[c].update(clusters[d])
cluster_touches_border
[
c
]
+=
cluster_touches_border
[
e
]
#clusters[c].add(x)
cluster_appartenance
[
x
]
=
c
if
x
[
0
]
==
0
:
cluster_touches_border
[
c
][
0
]
=
True
if
x
[
1
]
==
0
:
cluster_touches_border
[
c
][
1
]
=
True
if
x
[
0
]
==
Nx
-
1
:
cluster_touches_border
[
c
][
2
]
=
True
if
x
[
1
]
==
Ny
-
1
:
cluster_touches_border
[
c
][
3
]
=
True
if
(
cluster_touches_border
[
c
][
0
]
and
cluster_touches_border
[
c
][
2
])
or
(
cluster_touches_border
[
c
][
1
]
and
cluster_touches_border
[
c
][
3
]):
END
=
True
elif
len
(
cluster_found
)
==
0
:
#assert n_clusters == len(clusters)
#clusters.append(set([x]))
cluster_appartenance
[
x
]
=
n_clusters
matching_table
.
append
(
n_clusters
)
cluster_touches_border
.
append
(
np
.
zeros
(
6
,
dtype
=
bool
))
n_clusters
+=
1
else
:
raise
ValueError
if
END
:
return
W
[
x
]
print
(
"NOT PERCOLATE ???"
)
raise
ValueError
def
neighbours2Dsquare
(
ix
,
iy
,
Nx
,
Ny
):
n
=
[]
if
ix
>=
1
:
n
.
append
((
ix
-
1
,
iy
))
...
...
@@ -734,9 +813,85 @@ def mean_cluster_size_without_principal(W, energy):
def
compute_phi
(
W
,
E
):
result
=
0
for
x
in
W
:
for
y
in
x
:
for
z
in
y
:
if
z
<
E
:
result
+=
1
return
result
/
(
W
.
shape
[
0
]
*
W
.
shape
[
1
]
*
W
.
shape
[
2
])
\ No newline at end of file
result
=
np
.
sum
((
W
<
E
))
return
result
/
(
W
.
shape
[
0
]
*
W
.
shape
[
1
]
*
W
.
shape
[
2
])
def
compute_phi2D
(
W
,
E
):
result
=
0
result
=
np
.
sum
((
W
<
E
))
return
result
/
(
W
.
shape
[
0
]
*
W
.
shape
[
1
])
def
classical_loc_length_2D
(
W
,
E
):
Lx
,
Ly
=
W
.
shape
x
,
y
=
Lx
//
2
,
Ly
//
2
assert
W
[
x
,
y
]
<
E
to_be_explored
=
np
.
zeros_like
(
W
,
dtype
=
'bool'
)
to_be_explored
[
x
,
y
]
=
True
to_explore
=
deque
()
to_explore
.
append
((
x
,
y
))
n_explored
=
1
deltar2
=
0
while
len
(
to_explore
)
>
0
:
ind
=
to_explore
.
popleft
()
neigh
=
neighbours2Dcomplete
(
ind
[
0
],
ind
[
1
],
Lx
,
Ly
)
for
n
in
neigh
:
if
not
to_be_explored
[
n
]:
if
W
[
n
]
<
E
:
n_explored
+=
1
deltar2
+=
(
n
[
0
]
-
x
)
**
2
+
(
n
[
1
]
-
y
)
**
2
to_be_explored
[
n
]
=
True
to_explore
.
append
(
n
)
deltar2
/=
n_explored
return
2
*
np
.
sqrt
(
deltar2
)
def
classical_loc_length_3D
(
W
,
E
):
#A = (W-E) < 0
#print(np.sum(A), np.mean(W))
if
np
.
min
(
W
)
>
E
:
return
0
Lx
,
Ly
,
Lz
=
W
.
shape
x
,
y
,
z
=
Lx
//
2
,
Ly
//
2
,
Lz
//
2
if
W
[
x
,
y
,
z
]
>
E
:
checked
=
np
.
zeros_like
(
W
,
dtype
=
'bool'
)
checked
[
x
,
y
,
z
]
=
True
to_explore
=
deque
()
to_explore
.
append
((
x
,
y
,
z
))
x1
,
y1
,
z1
=
x
,
y
,
z
while
W
[
x
,
y
,
z
]
>
E
:
ind
=
to_explore
.
popleft
()
neigh
=
neighboursComplete
(
ind
[
0
],
ind
[
1
],
ind
[
2
],
Lx
,
Ly
,
Lz
)
for
n
in
neigh
:
if
W
[
n
]
<
E
:
x
,
y
,
z
=
n
break
if
not
checked
[
n
]:
checked
[
n
]
=
True
to_explore
.
append
(
n
)
assert
W
[
x
,
y
,
z
]
<
E
to_be_explored
=
np
.
zeros_like
(
W
,
dtype
=
'bool'
)
to_be_explored
[
x
,
y
,
z
]
=
True
to_explore
=
deque
()
to_explore
.
append
((
x
,
y
,
z
))
n_explored
=
1
deltar2
=
0
while
len
(
to_explore
)
>
0
:
ind
=
to_explore
.
popleft
()
neigh
=
neighboursComplete
(
ind
[
0
],
ind
[
1
],
ind
[
2
],
Lx
,
Ly
,
Lz
)
for
n
in
neigh
:
if
not
to_be_explored
[
n
]:
if
W
[
n
]
<
E
:
n_explored
+=
1
deltar2
+=
(
n
[
0
]
-
x
)
**
2
+
(
n
[
1
]
-
y
)
**
2
+
(
n
[
2
]
-
z
)
**
2
to_be_explored
[
n
]
=
True
to_explore
.
append
(
n
)
#print(n_explored, x,y,z)
deltar2
/=
n_explored
return
2
*
np
.
sqrt
(
deltar2
)
lib_tight_binding.py
0 → 100644
View file @
6f1d0738
import
os
import
sys
import
math
import
numpy
as
np
import
pypardiso
import
scipy.sparse
as
ssparse
import
scipy.sparse.linalg
as
alg
class
Potential
:
def
__init__
(
self
,
dimension
,
tab_dim
):
self
.
dimension
=
dimension
self
.
tab_dim
=
tab_dim
self
.
tab_dim_cumulative
=
np
.
zeros
(
dimension
+
1
,
dtype
=
int
)
ntot
=
1
self
.
tab_dim_cumulative
[
dimension
]
=
1
for
i
in
range
(
dimension
-
1
,
-
1
,
-
1
):
ntot
*=
tab_dim
[
i
]
self
.
tab_dim_cumulative
[
i
]
=
ntot
self
.
ntot
=
ntot
# print(self.tab_dim_cumulative)
self
.
disorder
=
np
.
zeros
(
tab_dim
)
return
class
Hamiltonian
(
Potential
):
def
__init__
(
self
,
dimension
,
tab_dim
,
tab_boundary_condition
,
disorder_type
=
'tight_binding_uniform'
,
link_strength
=
0.0
,
disorder_strength
=
0.0
,
use_mkl_random
=
False
):
super
().
__init__
(
dimension
,
tab_dim
)
self
.
disorder_strength
=
disorder_strength
self
.
tab_boundary_condition
=
tab_boundary_condition
self
.
disorder_type
=
disorder_type
self
.
link_strength
=
link_strength
self
.
use_mkl_random
=
use_mkl_random
# self.diagonal_term = np.zeros(tab_dim)
# self.script_tunneling = 0.
# self.script_disorder = np.zeros(tab_dim)
# self.medium_energy = 0.
self
.
generate
=
''
if
(
disorder_type
==
'tight_binding_uniform'
)
or
(
disorder_type
==
'tight_binding_binary'
):
self
.
generate
=
'direct'
return
"""
Generate a specific disorder configuration
"""
def
generate_disorder
(
self
,
seed
):
# print(self.use_mkl_random)
if
self
.
use_mkl_random
:
try
:
import
mkl_random
except
ImportError
:
self
.
use_mkl_random
=
False
print
(
'No mkl_random found; Fallback to Numpy random'
)
if
self
.
use_mkl_random
:
mkl_random
.
RandomState
(
77777
,
brng
=
'SFMT19937'
)
mkl_random
.
seed
(
seed
,
brng
=
'SFMT19937'
)
my_random_normal
=
mkl_random
.
standard_normal
my_random_uniform
=
mkl_random
.
uniform
else
:
np
.
random
.
seed
(
seed
)
my_random_normal
=
np
.
random
.
standard_normal
my_random_uniform
=
np
.
random
.
uniform
# print('seed=',seed)
# print(self.tab_dim_cumulative)
if
self
.
disorder_type
==
'tight_binding_uniform'
:
ntot
=
self
.
ntot
self
.
disorder
=
-
self
.
link_strength
*
2
*
self
.
dimension
+
self
.
disorder_strength
*
my_random_uniform
(
0
,
1
,
self
.
ntot
).
reshape
(
self
.
tab_dim
)
#self.disorder = self.disorder_strength*np.ones(self.ntot).reshape(self.tab_dim)
# print(self.disorder)
return
sys
.
exit
(
'Disorder '
+
self
.
disorder_type
+
' not yet implemented!'
)
return
def
shift_potential
(
self
,
shift
):
self
.
potential
=
self
.
potential
+
shift
return
def
convert3Dto1D
(
coord
):
#Lx, Ly, Lz = self.tab_dim
#offset_table = self.tab_dim_cumulative
#x,y,z = coord
return
sum
([
coord
[
i
]
*
self
.
tab_dim_cumulative
[
i
+
1
]
for
i
in
range
(
dimension
)])
def
convert1Dto3D
(
i
):
ans
=
np
.
zeros
(
dimension
)
for
d
in
range
(
dimension
):
ans
[
d
]
=
i
//
self
.
tab_dim_cumulative
[
d
+
1
]
i
=
i
%
self
.
tab_dim_cumulative
[
d
+
1
]
return
ans
"""def generate_matrix(seed):
ntot = self.ntot
V0 = self.disorder_strength
J = self.link_strength
assert self.boundary_conditions == 'periodic'
self.generate_disorder(seed)"""
def
generate_full_matrix
(
self
):
tab_index
=
np
.
zeros
(
self
.
dimension
,
dtype
=
int
)
matrix
=
np
.
diag
(
self
.
disorder
.
ravel
())
ntot
=
self
.
ntot
sub_diagonal
=
np
.
zeros
((
2
*
self
.
dimension
,
ntot
))
tab_offset
=
np
.
zeros
(
2
*
self
.
dimension
,
dtype
=
int
)
for
j
in
range
(
self
.
dimension
):
# Offsets for the regulat hoppings
tab_offset
[
j
]
=
self
.
tab_dim_cumulative
[
j
+
1
]
# Offsets for the periodic hoppings
tab_offset
[
j
+
self
.
dimension
]
=
self
.
tab_dim_cumulative
[
j
+
1
]
*
(
self
.
tab_dim
[
j
]
-
1
)
# print(tab_offset)
for
i
in
range
(
ntot
):
index
=
i
for
j
in
range
(
self
.
dimension
-
1
,
-
1
,
-
1
):
index
,
tab_index
[
j
]
=
divmod
(
index
,
self
.
tab_dim
[
j
])
# print(i,tab_index)
# Hopping along the various dimensions
for
j
in
range
(
self
.
dimension
):
# Regular hopping
if
tab_index
[
j
]
<
self
.
tab_dim
[
j
]
-
1
:
sub_diagonal
[
j
,
i
]
=
self
.
link_strength
else
:
# Only if periodic boundary condition
if
self
.
tab_boundary_condition
[
j
]
==
'periodic'
:
sub_diagonal
[
j
+
self
.
dimension
,
i
-
(
self
.
tab_dim
[
j
]
-
1
)
*
self
.
tab_dim_cumulative
[
j
+
1
]]
=
self
.
link_strength
for
j
in
range
(
self
.
dimension
):
np
.
fill_diagonal
(
matrix
[
tab_offset
[
j
]:,:],
sub_diagonal
[
j
,:])
np
.
fill_diagonal
(
matrix
[:,
tab_offset
[
j
]:],
sub_diagonal
[
j
,:])
if
self
.
tab_boundary_condition
[
j
]
==
'periodic'
:
np
.
fill_diagonal
(
matrix
[
tab_offset
[
j
+
self
.
dimension
]:,:],
sub_diagonal
[
j
+
self
.
dimension
,:])
np
.
fill_diagonal
(
matrix
[:,
tab_offset
[
j
+
self
.
dimension
]:],
sub_diagonal
[
j
+
self
.
dimension
,:])
# print(matrix)
return
matrix
def
generate_sparse_matrix
(
self
):
tab_index
=
np
.
zeros
(
self
.
dimension
,
dtype
=
int
)
diagonal
=
self
.
disorder
.
ravel
()
ntot
=
self
.
ntot
sub_diagonal
=
np
.
zeros
((
2
*
self
.
dimension
,
ntot
))
tab_offset
=
np
.
zeros
(
2
*
self
.
dimension
,
dtype
=
int
)
for
j
in
range
(
self
.
dimension
):
# Offsets for the regulat hoppings
tab_offset
[
j
]
=
self
.
tab_dim_cumulative
[
j
+
1
]
# Offsets for the periodic hoppings
tab_offset
[
j
+
self
.
dimension
]
=
self
.
tab_dim_cumulative
[
j
+
1
]
*
(
self
.
tab_dim
[
j
]
-
1
)
# print(tab_offset)
for
i
in
range
(
ntot
):
index
=
i
for
j
in
range
(
self
.
dimension
-
1
,
-
1
,
-
1
):
index
,
tab_index
[
j
]
=
divmod
(
index
,
self
.
tab_dim
[
j
])
# print(i,tab_index)
# Hopping along the various dimensions
for
j
in
range
(
self
.
dimension
):
# Regular hopping
if
tab_index
[
j
]
<
self
.
tab_dim
[
j
]
-
1
:
sub_diagonal
[
j
,
i
]
=
self
.
link_strength
else
:
# Only if periodic boundary condition
if
self
.
tab_boundary_condition
[
j
]
==
'periodic'
:
sub_diagonal
[
j
+
self
.
dimension
,
i
-
(
self
.
tab_dim
[
j
]
-
1
)
*
self
.
tab_dim_cumulative
[
j
+
1
]]
=
self
.
link_strength
diagonals
=
[
diagonal
]
offsets
=
[
0
]
for
j
in
range
(
self
.
dimension
):
diagonals
.
extend
([
sub_diagonal
[
j
,:],
sub_diagonal
[
j
,:]])
offsets
.
extend
([
tab_offset
[
j
],
-
tab_offset
[
j
]])
if
self
.
tab_boundary_condition
[
j
]
==
'periodic'
:
diagonals
.
extend
([
sub_diagonal
[
j
+
self
.
dimension
,:],
sub_diagonal
[
j
+
self
.
dimension
,:]])
offsets
.
extend
([
tab_offset
[
j
+
self
.
dimension
],
-
tab_offset
[
j
+
self
.
dimension
]])
# print(diagonals)