Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
UEABS
ueabs
Commits
90c5a1c4
Commit
90c5a1c4
authored
Apr 11, 2019
by
Martti Louhivuori
Browse files
Add input files for GPAW benchmarks
parent
c7f11e63
Changes
3
Hide whitespace changes
Inline
Side-by-side
gpaw/benchmark/carbon-nanotube/input.py
0 → 100644
View file @
90c5a1c4
###
### GPAW benchmark: Carbon Nanotube
###
from
__future__
import
print_function
from
gpaw.mpi
import
size
,
rank
from
gpaw
import
GPAW
,
Mixer
,
PoissonSolver
,
ConvergenceError
from
gpaw.occupations
import
FermiDirac
try
:
from
ase.build
import
nanotube
except
ImportError
:
from
ase.structure
import
nanotube
try
:
from
gpaw
import
use_mic
except
ImportError
:
use_mic
=
False
try
:
from
gpaw
import
use_cuda
use_cuda
=
True
except
ImportError
:
use_cuda
=
False
use_cpu
=
not
(
use_mic
or
use_cuda
)
# dimensions of the nanotube
n
=
6
m
=
6
length
=
10
# other parameters
txt
=
'output.txt'
maxiter
=
16
conv
=
{
'eigenstates'
:
1e-4
,
'density'
:
1e-2
,
'energy'
:
1e-3
}
# uncomment to use ScaLAPACK
#parallel = {'sl_auto': True}
# output benchmark parameters
if
rank
==
0
:
print
(
"#"
*
60
)
print
(
"GPAW benchmark: Carbon Nanotube"
)
print
(
" nanotube dimensions: n=%d, m=%d, length=%d"
%
(
n
,
m
,
length
))
print
(
" MPI tasks: %d"
%
size
)
print
(
" using CUDA (GPGPU): "
+
str
(
use_cuda
))
print
(
" using pyMIC (KNC) : "
+
str
(
use_mic
))
print
(
" using CPU (or KNL): "
+
str
(
use_cpu
))
print
(
"#"
*
60
)
print
(
""
)
# setup parameters
args
=
{
'h'
:
0.2
,
'nbands'
:
-
60
,
'occupations'
:
FermiDirac
(
0.1
),
'mixer'
:
Mixer
(
0.1
,
5
,
50
),
'poissonsolver'
:
PoissonSolver
(
eps
=
1e-12
),
'eigensolver'
:
'rmm-diis'
,
'maxiter'
:
maxiter
,
'convergence'
:
conv
,
'txt'
:
txt
}
if
use_cuda
:
args
[
'cuda'
]
=
True
try
:
args
[
'parallel'
]
=
parallel
except
:
pass
# setup the system
atoms
=
nanotube
(
n
,
m
,
length
)
atoms
.
center
(
vacuum
=
4.068
,
axis
=
0
)
atoms
.
center
(
vacuum
=
4.068
,
axis
=
1
)
calc
=
GPAW
(
**
args
)
atoms
.
set_calculator
(
calc
)
# execute the run
try
:
atoms
.
get_potential_energy
()
except
ConvergenceError
:
pass
gpaw/benchmark/copper-filament/input.py
0 → 100644
View file @
90c5a1c4
###
### GPAW benchmark: Copper Filament
###
from
__future__
import
print_function
from
gpaw.mpi
import
size
,
rank
from
gpaw
import
GPAW
,
Mixer
,
ConvergenceError
from
gpaw.occupations
import
FermiDirac
from
ase.lattice.cubic
import
FaceCenteredCubic
try
:
from
gpaw.eigensolvers.rmm_diis
import
RMM_DIIS
except
ImportError
:
from
gpaw.eigensolvers.rmmdiis
import
RMMDIIS
as
RMM_DIIS
try
:
from
gpaw
import
use_mic
except
ImportError
:
use_mic
=
False
try
:
from
gpaw
import
use_cuda
use_cuda
=
True
except
ImportError
:
use_cuda
=
False
use_cpu
=
not
(
use_mic
or
use_cuda
)
# no. of replicates in each dimension (increase to scale up the system)
x
=
3
y
=
2
z
=
4
# other parameters
h
=
0.22
kpts
=
(
1
,
1
,
8
)
txt
=
'output.txt'
maxiter
=
24
parallel
=
{
'sl_default'
:
(
2
,
2
,
64
)}
# output benchmark parameters
if
rank
==
0
:
print
(
"#"
*
60
)
print
(
"GPAW benchmark: Copper Filament"
)
print
(
" dimensions: x=%d, y=%d, z=%d"
%
(
x
,
y
,
z
))
print
(
" grid spacing: h=%f"
%
h
)
print
(
" Brillouin-zone sampling: kpts="
+
str
(
kpts
))
print
(
" MPI tasks: %d"
%
size
)
print
(
" using CUDA (GPGPU): "
+
str
(
use_cuda
))
print
(
" using pyMIC (KNC) : "
+
str
(
use_mic
))
print
(
" using CPU (or KNL): "
+
str
(
use_cpu
))
print
(
"#"
*
60
)
print
(
""
)
# compatibility hack for the eigensolver
rmm
=
RMM_DIIS
()
rmm
.
niter
=
2
# setup parameters
args
=
{
'h'
:
h
,
'nbands'
:
-
20
,
'occupations'
:
FermiDirac
(
0.2
),
'kpts'
:
kpts
,
'xc'
:
'PBE'
,
'mixer'
:
Mixer
(
0.1
,
5
,
100
),
'eigensolver'
:
rmm
,
'maxiter'
:
maxiter
,
'parallel'
:
parallel
,
'txt'
:
txt
}
if
use_cuda
:
args
[
'cuda'
]
=
True
# setup the system
atoms
=
FaceCenteredCubic
(
directions
=
[[
1
,
-
1
,
0
],
[
1
,
1
,
-
2
],
[
1
,
1
,
1
]],
size
=
(
x
,
y
,
z
),
symbol
=
'Cu'
,
pbc
=
(
0
,
0
,
1
))
atoms
.
center
(
vacuum
=
6.0
,
axis
=
0
)
atoms
.
center
(
vacuum
=
6.0
,
axis
=
1
)
calc
=
GPAW
(
**
args
)
atoms
.
set_calculator
(
calc
)
# execute the run
try
:
atoms
.
get_potential_energy
()
except
ConvergenceError
:
pass
gpaw/benchmark/silicon-cluster/input.py
0 → 100644
View file @
90c5a1c4
###
### GPAW benchmark: Silicon Cluster
###
from
__future__
import
print_function
from
gpaw.mpi
import
size
,
rank
from
gpaw
import
GPAW
,
Mixer
,
ConvergenceError
from
gpaw.occupations
import
FermiDirac
from
gpaw.utilities
import
h2gpts
from
ase.build
import
bulk
import
numpy
try
:
from
gpaw.eigensolvers.rmm_diis
import
RMM_DIIS
except
ImportError
:
from
gpaw.eigensolvers.rmmdiis
import
RMMDIIS
as
RMM_DIIS
try
:
from
gpaw
import
use_mic
except
ImportError
:
use_mic
=
False
try
:
from
gpaw
import
use_cuda
use_cuda
=
True
except
ImportError
:
use_cuda
=
False
use_cpu
=
not
(
use_mic
or
use_cuda
)
# radius of spherical cluster (increase to scale up the system)
radius
=
15
# no. of replicates in each dimension
x
=
int
(
2
*
radius
/
5.43
)
+
1
y
=
int
(
2
*
radius
/
5.43
)
+
1
z
=
int
(
2
*
radius
/
5.43
)
+
1
# other parameters
h
=
0.18
txt
=
'output.txt'
maxiter
=
24
bands_per_atom
=
2.15
parallel
=
{
'sl_default'
:
(
8
,
8
,
64
)}
# build a spherical cluster in vacuum
atoms
=
bulk
(
'Si'
,
cubic
=
True
)
atoms
=
atoms
.
repeat
((
x
,
y
,
z
))
atoms
.
center
(
vacuum
=
0.0
)
center
=
numpy
.
diag
(
atoms
.
get_cell
())
/
2.0
mask
=
numpy
.
array
([
numpy
.
linalg
.
norm
(
atom
.
position
-
center
)
<
radius
for
atom
in
atoms
])
atoms
=
atoms
[
mask
]
atoms
.
rotate
((
0.1
,
0.2
,
0.3
),
0.1
)
# break symmetry
atoms
.
center
(
vacuum
=
5.0
)
# setup band parallelisation
bands_per_block
=
int
(
radius
/
10.0
*
2
**
10
)
parallel
[
'band'
]
=
max
(
1
,
size
//
bands_per_block
//
2
*
2
)
while
(
size
%
parallel
[
'band'
]):
parallel
[
'band'
]
+=
2
# calculate the number of electronic bands
nbands
=
int
(
len
(
atoms
)
*
bands_per_atom
)
nbands
-=
nbands
%
16
while
(
nbands
%
parallel
[
'band'
]):
nbands
+=
2
# calculate the number of grid points
gpts
=
h2gpts
(
h
,
atoms
.
get_cell
(),
idiv
=
16
)
# output benchmark parameters
if
rank
==
0
:
print
(
"#"
*
60
)
print
(
"GPAW benchmark: Silicon Cluster"
)
print
(
" radius: %.1f"
%
radius
)
print
(
" grid spacing: %.3f"
%
h
)
print
(
" MPI tasks: %d"
%
size
)
print
(
" using CUDA (GPGPU): "
+
str
(
use_cuda
))
print
(
" using pyMIC (KNC) : "
+
str
(
use_mic
))
print
(
" using CPU (or KNL): "
+
str
(
use_cpu
))
print
(
"#"
*
60
)
print
(
""
)
# setup parameters
args
=
{
'gpts'
:
gpts
,
'nbands'
:
nbands
,
'occupations'
:
FermiDirac
(
0.05
),
'xc'
:
'LDA'
,
'mixer'
:
Mixer
(
0.1
,
5
,
100
),
'eigensolver'
:
RMM_DIIS
(
blocksize
=
20
),
'maxiter'
:
maxiter
,
'parallel'
:
parallel
,
'txt'
:
txt
}
if
use_cuda
:
args
[
'cuda'
]
=
True
# setup the calculator
calc
=
GPAW
(
**
args
)
atoms
.
set_calculator
(
calc
)
# execute the run
try
:
atoms
.
get_potential_energy
()
except
ConvergenceError
:
pass
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment