2023-02-19 20:57:32 -08:00
|
|
|
import torch
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
import celeste_ai.plotting as plotting
|
|
|
|
from multiprocessing import Pool
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
m = Path("model_data/current")
|
|
|
|
|
|
|
|
# Make "predicted reward" plots
|
|
|
|
def plot_pred(src_model):
|
|
|
|
plotting.predicted_reward(
|
|
|
|
src_model,
|
|
|
|
m / f"plots/predicted/{src_model.stem}.png",
|
|
|
|
|
2023-02-24 14:24:49 -08:00
|
|
|
device = torch.device("cpu")
|
2023-02-19 20:57:32 -08:00
|
|
|
)
|
|
|
|
|
2023-02-24 14:23:48 -08:00
|
|
|
# Make "best action" plots
|
|
|
|
def plot_best(src_model):
|
|
|
|
plotting.best_action(
|
|
|
|
src_model,
|
|
|
|
m / f"plots/best_action/{src_model.stem}.png",
|
|
|
|
|
2023-02-24 14:24:49 -08:00
|
|
|
device = torch.device("cpu")
|
2023-02-24 14:23:48 -08:00
|
|
|
)
|
2023-02-19 20:57:32 -08:00
|
|
|
|
|
|
|
# Make "actual reward" plots
|
|
|
|
def plot_act(src_model):
|
|
|
|
plotting.actual_reward(
|
|
|
|
src_model,
|
|
|
|
(60, 80),
|
|
|
|
m / f"plots/actual/{src_model.stem}.png",
|
|
|
|
|
|
|
|
device = torch.device("cpu")
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-02-24 14:23:48 -08:00
|
|
|
# Which plots should we make?
|
|
|
|
plots = {
|
|
|
|
"prediction": True,
|
|
|
|
"actual": False,
|
|
|
|
"best": True
|
|
|
|
}
|
|
|
|
|
2023-02-19 20:57:32 -08:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2023-02-24 14:23:48 -08:00
|
|
|
|
|
|
|
if plots["prediction"]:
|
|
|
|
print("Making prediction plots...")
|
|
|
|
with Pool(5) as p:
|
|
|
|
p.map(
|
|
|
|
plot_pred,
|
|
|
|
list((m / "model_archive").iterdir())
|
|
|
|
)
|
|
|
|
|
|
|
|
if plots["best"]:
|
|
|
|
print("Making best-action plots...")
|
|
|
|
with Pool(5) as p:
|
|
|
|
p.map(
|
|
|
|
plot_best,
|
|
|
|
list((m / "model_archive").iterdir())
|
|
|
|
)
|
|
|
|
|
|
|
|
if plots["actual"]:
|
|
|
|
print("Making actual plots...")
|
|
|
|
with Pool(5) as p:
|
|
|
|
p.map(
|
|
|
|
plot_act,
|
|
|
|
list((m / "model_archive").iterdir())
|
|
|
|
)
|