Skip to content

Commit a6a5c88

Browse files
authored
Merge pull request #16 from materialsproject/feature/lammps
Feature/lammps
2 parents c70678e + 0fc5ce9 commit a6a5c88

24 files changed

+254
-164
lines changed

src/mpmorph/analysis/diffusion.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ def plot(self, title=None, annotate=True, el="", **kwargs):
290290
self.y,
291291
yerr=self.yerr.T,
292292
label="Q[{}]: ".format(el) + tx + " K",
293-
**kwargs
293+
**kwargs,
294294
)
295295
plt.ylabel("ln(D cm$^2$/s)", fontsize=15)
296296
plt.xlabel("1000/T K$^{-1}$", fontsize=15)

src/mpmorph/analysis/melting_points.py

+26-21
Original file line numberDiff line numberDiff line change
@@ -7,29 +7,29 @@
77
import matplotlib.pyplot as plt
88
import math
99

10-
class MeltingPointClusterAnalyzer():
1110

11+
class MeltingPointClusterAnalyzer:
1212
def _get_clusters(self, points):
1313
clustering = AgglomerativeClustering(n_clusters=2).fit(points)
1414
cluster1 = points[np.argwhere(clustering.labels_ == 1).squeeze()].T
1515
cluster2 = points[np.argwhere(clustering.labels_ == 0).squeeze()].T
1616
return cluster1, cluster2
1717

18-
def plot_vol_vs_temp(self, ts, vs, plot_title = None):
18+
def plot_vol_vs_temp(self, ts, vs, plot_title=None):
1919
points = np.array(list(zip(ts, vs)))
2020
cluster1, cluster2 = self._get_clusters(points)
2121
plt.scatter(*cluster1)
2222
plt.scatter(*cluster2)
2323
plt.xlabel("Temperature (K)")
2424
plt.ylabel("Volume (A^3)")
2525
Tm = self.estimate_melting_temp(ts, vs)
26-
plt.plot([Tm, Tm], [min(vs), max(vs)], color='r')
27-
26+
plt.plot([Tm, Tm], [min(vs), max(vs)], color="r")
27+
2828
if plot_title is None:
2929
plt.title("Volume vs Temperature by Clustering")
3030
else:
3131
plt.title(plot_title)
32-
32+
3333
def estimate_melting_temp(self, temps, vols):
3434
points = np.array(list(zip(temps, vols)))
3535
cluster1, cluster2 = self._get_clusters(points)
@@ -42,8 +42,8 @@ def estimate_melting_temp(self, temps, vols):
4242

4343
return np.mean([max(solid_range), min(liquid_range)])
4444

45-
class MeltingPointSlopeAnalyzer():
4645

46+
class MeltingPointSlopeAnalyzer:
4747
def split_dset(self, pts, split_idx):
4848
return pts[0:split_idx], pts[split_idx:]
4949

@@ -56,7 +56,7 @@ def assess_splits(self, xs, ys):
5656
for idx in pt_idxs:
5757
_, _, _, _, total_err = self.get_split_fit(xs, ys, idx)
5858
errs.append(total_err)
59-
59+
6060
return list(zip(pt_idxs, errs))
6161

6262
def get_linear_ys(self, m, b, xs):
@@ -79,56 +79,61 @@ def plot_split(self, xs, ys, split_idx):
7979

8080
plt.scatter(rightxs, rightys)
8181
plt.plot(rightxs, right_fit_ys)
82-
82+
8383
def get_best_split(self, xs, ys):
8484
split_errs = self.assess_splits(xs, ys)
8585
errs = [pt[1] for pt in split_errs]
8686
idxs = [pt[0] for pt in split_errs]
8787
best_split_idx = idxs[np.argmin(errs)]
8888
return best_split_idx
89-
89+
9090
def plot_vol_vs_temp(self, temps, vols):
9191
split_idx = self.get_best_split(temps, vols)
9292
self.plot_split(temps, vols, split_idx)
9393
Tm = self.estimate_melting_temp(temps, vols)
9494
print(Tm)
95-
plt.plot([Tm, Tm], [min(vols), max(vols)], color='r')
96-
95+
plt.plot([Tm, Tm], [min(vols), max(vols)], color="r")
9796

9897
def estimate_melting_temp(self, temps, vols):
9998
best_split_idx = self.get_best_split(temps, vols)
10099
return np.mean([temps[best_split_idx], temps[best_split_idx - 1]])
101100

102-
class MeltingPointSlopeRMSEAnalyzer(MeltingPointSlopeAnalyzer):
103101

102+
class MeltingPointSlopeRMSEAnalyzer(MeltingPointSlopeAnalyzer):
104103
def get_split_fit(self, xs, ys, split_idx):
105104
leftx, rightx = self.split_dset(xs, split_idx)
106105
lefty, righty = self.split_dset(ys, split_idx)
107-
106+
108107
lslope, lintercept, r_value, p_value, std_err = linregress(leftx, lefty)
109108
left_y_pred = lintercept + lslope * np.array(leftx)
110109
lefterr = mean_squared_error(y_true=lefty, y_pred=left_y_pred, squared=False)
111110

112111
rslope, rintercept, r_value, p_value, std_err = linregress(rightx, righty)
113112
right_y_pred = rintercept + rslope * np.array(rightx)
114113
righterr = mean_squared_error(y_true=righty, y_pred=right_y_pred, squared=False)
115-
116-
combined_err = math.sqrt(lefterr ** 2 + righterr ** 2)
114+
115+
combined_err = math.sqrt(lefterr**2 + righterr**2)
117116
combined_err = lefterr + righterr
118117
return lslope, lintercept, rslope, rintercept, combined_err
119118

120-
class MeltingPointSlopeStdErrAnalyzer(MeltingPointSlopeAnalyzer):
121119

120+
class MeltingPointSlopeStdErrAnalyzer(MeltingPointSlopeAnalyzer):
122121
def get_split_fit(self, xs, ys, split_idx):
123122
leftx, rightx = self.split_dset(xs, split_idx)
124123
lefty, righty = self.split_dset(ys, split_idx)
125-
124+
126125
leftfit = linregress(leftx, lefty)
127126
lefterr = leftfit.stderr
128-
127+
129128
rightfit = linregress(rightx, righty)
130129
righterr = rightfit.stderr
131-
132-
combined_err = math.sqrt(lefterr ** 2 + righterr ** 2)
130+
131+
combined_err = math.sqrt(lefterr**2 + righterr**2)
133132
combined_err = lefterr + righterr
134-
return leftfit.slope, leftfit.intercept, rightfit.slope, rightfit.intercept, combined_err
133+
return (
134+
leftfit.slope,
135+
leftfit.intercept,
136+
rightfit.slope,
137+
rightfit.intercept,
138+
combined_err,
139+
)

src/mpmorph/analysis/structural_analysis.py

-3
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ def polyhedra_connectivity(structures, pair, cutoff, step_freq=1):
4747
polyhedra_list.append(set(current_poly))
4848

4949
for polypair in itertools.combinations(polyhedra_list, 2):
50-
5150
polyhedra_pair_type = (len(polypair[0]), len(polypair[1]))
5251

5352
shared_vertices = len(polypair[0].intersection(polypair[1]))
@@ -143,7 +142,6 @@ class BondAngleDistribution(object):
143142
"""
144143

145144
def __init__(self, structures, cutoffs, step_freq=1):
146-
147145
self.bond_angle_distribution = None
148146
self.structures = structures
149147
self.step_freq = step_freq
@@ -251,7 +249,6 @@ def get_bond_angle_distribution(self):
251249

252250
# get all pair combinations of neoghbor sites of i:
253251
for p in itertools.combinations(neighbors[i], 2):
254-
255252
# check if pairs are within the defined cutoffs
256253
if self._cutoff_type == "dict":
257254
if self._check_skip_triplet(s_index, i, p[0][2], p[1][2]):

src/mpmorph/database.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def __init__(
3030
collection="tasks",
3131
user=None,
3232
password=None,
33-
**kwargs
33+
**kwargs,
3434
):
3535
super(VaspMDCalcDb, self).__init__(
3636
host, port, database, collection, user, password, **kwargs
@@ -75,7 +75,6 @@ def insert_task(
7575

7676
# insert structures at each ionic step into GridFS
7777
if parse_ionic_steps and "calcs_reversed" in task_doc:
78-
7978
# Convert from ionic steps dictionary to pymatgen.core.trajectory.Trajectory object
8079
ionic_steps_dict = task_doc["calcs_reversed"][0]["output"]["ionic_steps"]
8180
time_step = task_doc["input"]["incar"]["POTIM"]

src/mpmorph/firetasks/dbtasks.py

-1
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,6 @@ def load_trajectories_from_gfs(runs, mmdb, gfs_keys=None):
222222

223223
trajectory = None
224224
for i, (fs_id, fs) in enumerate(gfs_keys):
225-
226225
if fs == "trajectories_fs" or fs == "rebuild_trajectories_fs":
227226
# Load stored Trajectory
228227
print(fs_id, "is stored in trajectories_fs")

src/mpmorph/fireworks/powerups.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def aggregate_trajectory(fw, **kwargs):
4646
def add_cont_structure(fw):
4747
prev_struct_task = PreviousStructureTask()
4848
insert_i = 2
49-
for (i, task) in enumerate(fw.tasks):
49+
for i, task in enumerate(fw.tasks):
5050
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
5151
insert_i = i
5252
break
@@ -68,7 +68,7 @@ def add_pass_pv(fw, **kwargs):
6868

6969
def add_pv_volume_rescale(fw):
7070
insert_i = 2
71-
for (i, task) in enumerate(fw.tasks):
71+
for i, task in enumerate(fw.tasks):
7272
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
7373
insert_i = i
7474
break
@@ -80,7 +80,7 @@ def add_pv_volume_rescale(fw):
8080
def add_rescale_volume(fw, **kwargs):
8181
rsv_task = RescaleVolumeTask(**kwargs)
8282
insert_i = 2
83-
for (i, task) in enumerate(fw.tasks):
83+
for i, task in enumerate(fw.tasks):
8484
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
8585
insert_i = i
8686
break

src/mpmorph/flows/md_flow.py

+6
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@
1414
M3GNET_MD_CONVERGED_VOL_FLOW = "M3GNET_MD_CONVERGED_VOL_FLOW"
1515
LAMMPS_VOL_FLOW = "LAMMPS_VOL_FLOW"
1616

17+
18+
# def get_md_temperature_sweeping(structure, temp, steps, converge_first = True, initial_vol_scale = 1, **input_kwargs):
19+
20+
1721
def get_md_flow_m3gnet(structure, temp, steps, converge_first = True, initial_vol_scale = 1, **input_kwargs):
1822
inputs = M3GNetMDInputs(
1923
temperature=temp,
@@ -91,3 +95,5 @@ def _get_converge_flow(structure: Structure, pv_md_maker: PVFromCalc, production
9195
flow = Flow([equil_vol_job, final_md_job], output=final_md_job.output, name=M3GNET_MD_CONVERGED_VOL_FLOW)
9296

9397
return flow
98+
99+

src/mpmorph/flows/vt_flow.py

+17-16
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
VOLUME_TEMPERATURE_SWEEP = "VOLUME_TEMPERATURE_SWEEP"
1010

11+
1112
def get_vt_sweep_flow(
1213
structure,
1314
lower_bound=100,
@@ -16,35 +17,34 @@ def get_vt_sweep_flow(
1617
output_name="vt.out",
1718
steps=2000,
1819
):
19-
2020
vs = []
2121
volume_jobs = []
2222
temps = list(range(lower_bound, upper_bound, temp_step))
2323

2424
for temp in temps:
25-
job = get_equil_vol_flow(
26-
structure=structure,
27-
temp=temp,
28-
steps=steps
29-
)
25+
job = get_equil_vol_flow(structure=structure, temp=temp, steps=steps)
3026
volume_jobs.append(job)
3127
vs.append(job.output.volume)
3228

3329
collect_job = _collect_vt_results(vs, temps, structure, output_name)
3430

35-
new_flow = Flow([*volume_jobs, collect_job], output=collect_job.output, name=VOLUME_TEMPERATURE_SWEEP)
31+
new_flow = Flow(
32+
[*volume_jobs, collect_job],
33+
output=collect_job.output,
34+
name=VOLUME_TEMPERATURE_SWEEP,
35+
)
3636
return new_flow
3737

38+
3839
def get_vt_sweep_flow_lammps(
3940
structure,
4041
lower_bound=100,
4142
upper_bound=1100,
4243
temp_step=100,
4344
output_name="vt.out",
4445
steps=2000,
45-
mp_id=None
46+
mp_id=None,
4647
):
47-
4848
v_outputs = []
4949
volume_jobs = []
5050
temps = list(range(lower_bound, upper_bound, temp_step))
@@ -60,9 +60,10 @@ def get_vt_sweep_flow_lammps(
6060

6161
collect_job = _collect_vt_results(v_outputs, temps, structure, output_name, mp_id)
6262

63-
64-
flow_name = f'{structure.composition.reduced_formula}-Melting Point'
65-
new_flow = Flow([*volume_jobs, collect_job], output=collect_job.output, name=flow_name)
63+
flow_name = f"{structure.composition.reduced_formula}-Melting Point"
64+
new_flow = Flow(
65+
[*volume_jobs, collect_job], output=collect_job.output, name=flow_name
66+
)
6667
return new_flow
6768

6869

@@ -75,18 +76,18 @@ def _collect_vt_results(v_outputs, ts, structure, output_fn, mp_id):
7576
"mp_id": mp_id,
7677
"reduced_formula": structure.composition.reduced_formula,
7778
"formula": structure.composition.formula,
78-
"uuid": str(uuid.uuid4())
79+
"uuid": str(uuid.uuid4()),
7980
}
8081

8182
with open(output_fn, "+w") as f:
8283
f.write(json.dumps(result))
8384
return result
8485

85-
def get_converged_vol(v_output):
86+
87+
def get_converged_vol(v_output):
8688
df = pd.DataFrame.from_dict(v_output)
8789
total_steps = (len(df) - 1) * 10
8890
avging_window = int(total_steps / 30)
89-
vols = df.iloc[-avging_window::]['vol']
91+
vols = df.iloc[-avging_window::]["vol"]
9092
eq_vol = vols.values.mean()
9193
return float(eq_vol)
92-

src/mpmorph/io.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,13 @@ def get_string_from_struct(
1717
):
1818
format_str = "{{:.{0}f}}".format(significant_figures)
1919

20-
for (si, structure) in enumerate(structures):
20+
for si, structure in enumerate(structures):
2121
lines = [system, "1.0", str(structure.lattice)]
2222
lines.append(" ".join(self.get_site_symbols(structure)))
2323
lines.append(" ".join([str(x) for x in self.get_natoms(structure)]))
2424

2525
lines.append("Direct configuration= " + str(si + 1))
26-
for (i, site) in enumerate(structure):
26+
for i, site in enumerate(structure):
2727
coords = site.frac_coords
2828
line = " ".join([format_str.format(c) for c in coords])
2929
line += " " + site.species_string
@@ -72,9 +72,9 @@ def get_string(self, system="unknown system", significant_figures=6):
7272
# positions = np.add(self.trajectory[0].frac_coords, self.trajectory.displacements)
7373
atoms = [site.specie.symbol for site in self.trajectory[0]]
7474

75-
for (si, position_array) in enumerate(positions):
75+
for si, position_array in enumerate(positions):
7676
lines.append("Direct configuration= " + str(si + 1))
77-
for (i, coords) in enumerate(position_array):
77+
for i, coords in enumerate(position_array):
7878
line = " ".join([format_str.format(c) for c in coords])
7979
line += " " + atoms[i]
8080
lines.append(line)

src/mpmorph/jobs/equilibrate_volume.py

-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ class EquilibriumVolumeSearchMaker(Maker):
2929
def make(
3030
self, original_structure: Structure, md_pv_data_docs: List[MDPVDataDoc] = None
3131
):
32-
3332
if md_pv_data_docs is not None and len(md_pv_data_docs) > MAX_MD_JOBS:
3433
raise RuntimeError(
3534
"Maximum number of jobs for equilibrium volume search exceeded"

src/mpmorph/jobs/lammps/helpers.py

+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
from ase.io.lammpsrun import read_lammps_dump_text
2+
from pymatgen.io.ase import AseAtomsAdaptor
3+
from pymatgen.core.trajectory import Trajectory
4+
from pymatgen.io.lammps.inputs import LammpsTemplateGen
5+
from pymatgen.io.lammps.data import LammpsData
6+
from subprocess import PIPE, Popen
7+
8+
def trajectory_from_lammps_dump(dump_path):
9+
with open(dump_path, "r+") as f:
10+
atoms = read_lammps_dump_text(f, index=slice(0, None))
11+
12+
structs = []
13+
14+
for a in atoms:
15+
structs.append(AseAtomsAdaptor().get_structure(a))
16+
17+
return Trajectory.from_structures(structs, constant_lattice=False)
18+
19+
def run_lammps(structure, template_path, template_opts, lammps_bin):
20+
data_filename: str = "data.lammps"
21+
data = LammpsData.from_structure(structure, atom_style='atomic')
22+
# Write the input files
23+
linp = LammpsTemplateGen().get_input_set(script_template=template_path,
24+
settings=template_opts,
25+
data=data,
26+
data_filename=data_filename)
27+
28+
linp.write_input(directory=".")
29+
input_name = "in.lammps"
30+
# Run LAMMPS
31+
32+
lammps_cmd = [lammps_bin, "-in", input_name]
33+
print(f"Running: {' '.join(lammps_cmd)}")
34+
with Popen(lammps_cmd, stdout=PIPE, stderr=PIPE) as p:
35+
(stdout, stderr) = p.communicate()
36+
37+
print(f"LAMMPS finished running: {stdout} \n {stderr}")

0 commit comments

Comments
 (0)