Skip to content

Commit

Permalink
v0.1.2
Browse files Browse the repository at this point in the history
  • Loading branch information
Jintao-Huang committed Sep 22, 2022
1 parent 65e4b1d commit 4a1ff2a
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 30 deletions.
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@

## Install
1. Create a virtual environment and install Python (>= 3.8)
2. Download the latest version (>=1.12) of Torch(corresponding CUDA version) from the [official website](https://pytorch.org/get-started/locally/) of Torch. It is not recommended to automatically install Torch (CUDA 10.2) using the Mini-Lightning dependency, which will cause CUDA version mismatch.
2. Download the latest version (>=1.12) of Torch(corresponding CUDA version) from the [official website](https://pytorch.org/get-started/locally/) of Torch. It is not recommended to automatically install Torch (CUDA 10.2 default) using the Mini-Lightning dependency, which will cause CUDA version mismatch.
3. Install mini-lightning
```bash
# from pypi (v0.1.1)
# from pypi (v0.1.2)
pip install mini-lightning

# Or download the files from the repository to local,
# and go to the folder where setup.py is located, and run the following command
# (recommend) You can enjoy the latest features and functions
# (Recommended) You can enjoy the latest features and functions (including bug fixes)
pip install .
```

Expand Down Expand Up @@ -47,7 +47,7 @@ pip install gym, pygame
python examples/dqn.py

### cv_ddp.py; cv_ddp_spawn.py
# torchrun (recommended): Ref: https://pytorch.org/docs/stable/elastic/run.html
# torchrun (Recommended): Ref: https://pytorch.org/docs/stable/elastic/run.html
# spawn: Ref: https://pytorch.org/docs/stable/notes/ddp.html
## single-gpu # for test
torchrun examples/cv_ddp.py --device_ids 0
Expand Down
8 changes: 3 additions & 5 deletions examples/cv.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,10 @@
import torchvision.models as tvm
#
CIFAR10 = tvd.CIFAR10
RUNS_DIR = os.path.join(RUNS_DIR, "cv")
DATASETS_PATH = os.environ.get("DATASETS_PATH", os.path.join(RUNS_DIR, "datasets"))
CHECKPOINTS_PATH = os.path.join(RUNS_DIR, "checkpoints")
RUNS_DIR = os.path.join(RUNS_DIR, "cv")
os.makedirs(DATASETS_PATH, exist_ok=True)
os.makedirs(CHECKPOINTS_PATH, exist_ok=True)
os.makedirs(RUNS_DIR, exist_ok=True)

#
device_ids = [0]
Expand Down Expand Up @@ -116,7 +115,6 @@ def test_step(self, batch: Any) -> None:
ldm = ml.LDataModule(
train_dataset, val_dataset, test_dataset, **hparams["dataloader_hparams"])

runs_dir = CHECKPOINTS_PATH
loss_fn = nn.CrossEntropyLoss()

def collect_res(seed: int) -> Dict[str, float]:
Expand All @@ -129,7 +127,7 @@ def collect_res(seed: int) -> Dict[str, float]:
lr_s = ml.WarmupCosineAnnealingLR(optimizer, **hparams["lrs_hparams"])

lmodel = MyLModule(model, optimizer, loss_fn, lr_s, hparams)
trainer = ml.Trainer(lmodel, device_ids, runs_dir=runs_dir, **hparams["trainer_hparams"])
trainer = ml.Trainer(lmodel, device_ids, runs_dir=RUNS_DIR, **hparams["trainer_hparams"])
res = trainer.fit(ldm.train_dataloader, ldm.val_dataloader)
res2 = trainer.test(ldm.test_dataloader)
res.update(res2)
Expand Down
8 changes: 3 additions & 5 deletions examples/cv_ddp.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,10 @@
import torchvision.models as tvm
#
CIFAR10 = tvd.CIFAR10
RUNS_DIR = os.path.join(RUNS_DIR, "cv_ddp")
DATASETS_PATH = os.environ.get("DATASETS_PATH", os.path.join(RUNS_DIR, "datasets"))
CHECKPOINTS_PATH = os.path.join(RUNS_DIR, "checkpoints")
RUNS_DIR = os.path.join(RUNS_DIR, "cv_ddp")
os.makedirs(DATASETS_PATH, exist_ok=True)
os.makedirs(CHECKPOINTS_PATH, exist_ok=True)
os.makedirs(RUNS_DIR, exist_ok=True)
#


Expand Down Expand Up @@ -130,7 +129,6 @@ def parse_opt() -> Namespace:
ldm = ml.LDataModule(
train_dataset, val_dataset, test_dataset, **hparams["dataloader_hparams"])

runs_dir = CHECKPOINTS_PATH
loss_fn = nn.CrossEntropyLoss()

def collect_res(seed: int) -> Dict[str, float]:
Expand All @@ -146,7 +144,7 @@ def collect_res(seed: int) -> Dict[str, float]:
lr_s = ml.WarmupCosineAnnealingLR(optimizer, **hparams["lrs_hparams"])
#
lmodel = MyLModule(model, optimizer, loss_fn, lr_s, hparams)
trainer = ml.Trainer(lmodel, device_ids, runs_dir=runs_dir, **hparams["trainer_hparams"])
trainer = ml.Trainer(lmodel, device_ids, runs_dir=RUNS_DIR, **hparams["trainer_hparams"])
res = trainer.fit(ldm.train_dataloader, ldm.val_dataloader)
res2 = trainer.test(ldm.test_dataloader)
res.update(res2)
Expand Down
8 changes: 3 additions & 5 deletions examples/cv_ddp_spawn.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,10 @@
import torchvision.models as tvm
#
CIFAR10 = tvd.CIFAR10
RUNS_DIR = os.path.join(RUNS_DIR, "cv_ddp")
DATASETS_PATH = os.environ.get("DATASETS_PATH", os.path.join(RUNS_DIR, "datasets"))
CHECKPOINTS_PATH = os.path.join(RUNS_DIR, "checkpoints")
RUNS_DIR = os.path.join(RUNS_DIR, "cv_ddp")
os.makedirs(DATASETS_PATH, exist_ok=True)
os.makedirs(CHECKPOINTS_PATH, exist_ok=True)
os.makedirs(RUNS_DIR, exist_ok=True)
#


Expand Down Expand Up @@ -125,7 +124,6 @@ def main(rank: int, world_size: int, device_ids: List[int]) -> None:
ldm = ml.LDataModule(
train_dataset, val_dataset, test_dataset, **hparams["dataloader_hparams"])

runs_dir = CHECKPOINTS_PATH
loss_fn = nn.CrossEntropyLoss()

def collect_res(seed: int) -> Dict[str, float]:
Expand All @@ -141,7 +139,7 @@ def collect_res(seed: int) -> Dict[str, float]:
lr_s = ml.WarmupCosineAnnealingLR(optimizer, **hparams["lrs_hparams"])
#
lmodel = MyLModule(model, optimizer, loss_fn, lr_s, hparams)
trainer = ml.Trainer(lmodel, device_ids, runs_dir=runs_dir, **hparams["trainer_hparams"])
trainer = ml.Trainer(lmodel, device_ids, runs_dir=RUNS_DIR, **hparams["trainer_hparams"])
res = trainer.fit(ldm.train_dataloader, ldm.val_dataloader)
res2 = trainer.test(ldm.test_dataloader)
res.update(res2)
Expand Down
8 changes: 3 additions & 5 deletions examples/dqn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,10 @@


RENDER = True
RUNS_DIR = os.path.join(RUNS_DIR, "dqn")
DATASETS_PATH = os.environ.get("DATASETS_PATH", os.path.join(RUNS_DIR, "datasets"))
CHECKPOINTS_PATH = os.path.join(RUNS_DIR, "checkpoints")
RUNS_DIR = os.path.join(RUNS_DIR, "dqn")
os.makedirs(DATASETS_PATH, exist_ok=True)
os.makedirs(CHECKPOINTS_PATH, exist_ok=True)
os.makedirs(RUNS_DIR, exist_ok=True)

#
device_ids = [0]
Expand Down Expand Up @@ -231,9 +230,8 @@ def training_step(self, batch: Any) -> Tensor:
#
get_rand_p = partial(get_rand_p, **hparams["rand_p"])
optimizer = getattr(optim, hparams["optim_name"])(model.parameters(), **hparams["optim_hparams"])
runs_dir = CHECKPOINTS_PATH
loss_fn = nn.MSELoss()

lmodel = MyLModule(model, optimizer, loss_fn, agent, get_rand_p, hparams)
trainer = ml.Trainer(lmodel, device_ids, runs_dir=runs_dir, **hparams["trainer_hparams"])
trainer = ml.Trainer(lmodel, device_ids, runs_dir=RUNS_DIR, **hparams["trainer_hparams"])
trainer.fit(ldm.train_dataloader, ldm.val_dataloader)
8 changes: 3 additions & 5 deletions examples/nlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,10 @@


#
RUNS_DIR = os.path.join(RUNS_DIR, "nlp")
DATASETS_PATH = os.environ.get("DATASETS_PATH", os.path.join(RUNS_DIR, "datasets"))
CHECKPOINTS_PATH = os.path.join(RUNS_DIR, "checkpoints")
RUNS_DIR = os.path.join(RUNS_DIR, "nlp")
os.makedirs(DATASETS_PATH, exist_ok=True)
os.makedirs(CHECKPOINTS_PATH, exist_ok=True)
os.makedirs(RUNS_DIR, exist_ok=True)
os.environ["TOKENIZERS_PARALLELISM"] = "true"

#
Expand Down Expand Up @@ -113,11 +112,10 @@ def tokenize_function(example):
"recall": Recall(average="macro", num_classes=2),
"f1": F1Score(average="none", num_classes=2)
}
runs_dir = CHECKPOINTS_PATH
loss_fn = nn.CrossEntropyLoss()
lr_s = ml.WarmupCosineAnnealingLR(optimizer, **hparams["lrs_hparams"])
lmodel = MyLModule(model, optimizer, metrics, loss_fn, lr_s, hparams)
trainer = ml.Trainer(lmodel, device_ids, runs_dir=runs_dir, **hparams["trainer_hparams"])
trainer = ml.Trainer(lmodel, device_ids, runs_dir=RUNS_DIR, **hparams["trainer_hparams"])
try:
logger.info(trainer.fit(ldm.train_dataloader, ldm.val_dataloader))
except KeyboardInterrupt:
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def read_file(path: str) -> str:
]
setup(
name="mini-lightning",
version="0.1.2.dev",
version="0.1.2",
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
Expand Down

0 comments on commit 4a1ff2a

Please sign in to comment.