-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtrain.py
45 lines (36 loc) · 1.54 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os
import sys
import argparse
from tactics.ml.agents import *
sys.path.insert(1, "sharpy-sc2")
sys.path.insert(1, os.path.join("sharpy-sc2", "python-sc2"))
import tensorflow as tf
from tactics.ml.environments.sc2_env import Sc2Env
STOP_FILE: str = "runner-stop.txt"
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Run bot games"
)
parser.add_argument("-env", help=f"Environment name (workerdistraction, harvester, cartpole).", default="workerdistraction")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "" # use CPU??
tf.enable_eager_execution()
with tf.device('/cpu:0'): # use CPU instead of GPU
while not os.path.isfile(STOP_FILE):
if args.env == "workerdistraction":
env = Sc2Env("test_bot.workerdistraction",
"AbyssalReefLE",
"debugmlworkerrushdefender",
"learning",
"workerdistraction")
elif args.env == "harvester":
env = Sc2Env("test_bot.default",
"AbyssalReefLE",
"harvester",
"learning",
"default")
elif args.env == "cartpole":
agent: BaseMLAgent = A3CAgent(args.env, 4, 2)
from tactics.ml.environments.cartpole_env import CartPoleEnv
env = CartPoleEnv(agent.choose_action, agent.on_end)
env.run()