Skip to content

Commit

Permalink
minor fix for v1.2
Browse files Browse the repository at this point in the history
  • Loading branch information
Kawaeee committed Feb 10, 2022
1 parent 266d484 commit 4b92195
Show file tree
Hide file tree
Showing 5 changed files with 270 additions and 21 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ RUN git clone https://github.com/Kawaeee/butt_or_bread.git

# Download model
RUN cd /opt/butt_or_bread/
RUN wget https://github.com/Kawaeee/butt_or_bread/releases/download/v1.1/buttbread_resnet152_3.h5
RUN wget https://github.com/Kawaeee/butt_or_bread/releases/download/v1.2/buttbread_resnet152_3.h5

# Install python packages
RUN pip install --upgrade pip
Expand Down
14 changes: 6 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

**After that, we manually remove incorrect images and apply [phash](https://github.com/Kawaeee/phash) (Perceptual Hashing) to get rid of duplication images.**

* Total images: 6385 images (randomly split using 80:10:10 ratios) with [dataset-split](https://github.com/muriloxyz/dataset-split)
* Total images: 6385 images (randomly split using 80:10:10 ratios)

* **Bread**: 3710 images
* Train: 2968 images
Expand All @@ -39,9 +39,9 @@
|**Valid**|0.0132|0.9969|
|**Test**|-|0.9968|

* We already know that to benchmark our model performance, we can't just use `accuracy` and `validation_loss` value as the only acceptable metrics.
* We already know that in order to benchmark our model performance, we can't just use `accuracy` and `validation_loss` value as the only acceptable metrics.

#### You can download our model weight here: [v1.2](https://github.com/Kawaeee/butt_or_bread/releases/download/v1.3/buttbread_resnet152_3.h5)
#### You can download our model weight here: [v1.2](https://github.com/Kawaeee/butt_or_bread/releases/download/v1.2/buttbread_resnet152_3.h5)

## Hyperparameters and configurations

Expand All @@ -54,7 +54,7 @@
## Dataset Preparation
* To reproduce the model, requires our datasets. You can send me an e-mail at `[email protected]` if you are interested.

- Prepare dataset in these following directory structure
- Initial datasets/ directory structure
```Bash
└───datasets/
│ butt/
Expand All @@ -66,12 +66,12 @@
pip install dataset-split
```

- Execute `dataset-split` command with following arguments on both category
- Execute `dataset-split` command with following arguments
```bash
dataset-split dataset/ -r 0.8 0.1 0.1
```

- The result will be in this format, and we are ready to proceed to model training
- Ready-to-go datasets/ directory structure
```Bash
└───datasets/
│ │
Expand Down Expand Up @@ -136,5 +136,3 @@
```Bash
docker run -p 8501:8501 butt_or_bread
```

- Streamlit web application will be hosted on http://localhost:8501
246 changes: 246 additions & 0 deletions predictor.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions streamlit_app.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"icon":"assets/icon/corgi-icon.png",
"markdown":{
"star":"[![GitHub Star](https://img.shields.io/github/stars/Kawaeee/butt_or_bread)](https://github.com/Kawaeee/butt_or_bread)",
"release":"[![GitHub Release](https://img.shields.io/github/v/release/Kawaeee/butt_or_bread)](https://github.com/Kawaeee/butt_or_bread/releases/tag/v1.1)",
"release":"[![GitHub Release](https://img.shields.io/github/v/release/Kawaeee/butt_or_bread)](https://github.com/Kawaeee/butt_or_bread/releases/tag/v1.2)",
"visitor":"![Visitor Badge](https://visitor-badge.glitch.me/badge?page_id=Kawaeee.butt_or_bread.visitor-badge)"
},
"mode":{
Expand All @@ -20,7 +20,7 @@
}
},
"model":{
"url":"https://github.com/Kawaeee/butt_or_bread/releases/download/v1.1/buttbread_resnet152_3.h5",
"url":"https://github.com/Kawaeee/butt_or_bread/releases/download/v1.2/buttbread_resnet152_3.h5",
"label":{
"corgi":"Corgi butt 🐕",
"bread":"Loaf of bread 🍞"
Expand Down
25 changes: 15 additions & 10 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,20 @@


class ButtBreadModel:
"""Corgi butt or loaf of bread? model"""

def __init__(self, device):
self.model = None
self.device = device
self.criterion = None
self.optimizer = None

def initialize(self):
"""Transfer Learning by using ResNet-152 as pre-trained weight"""
self.model = models.resnet152(pretrained=True).to(self.device)

for param in self.model.parameters():
param.requires_grad = False
for parameter in self.model.parameters():
parameter.requires_grad = False

self.model.fc = torch.nn.Sequential(
torch.nn.Linear(2048, 128),
Expand All @@ -47,7 +50,7 @@ def train(self, image_dataloaders, image_datasets, epochs=1):
running_loss = 0.0
running_corrects = 0

# Iterate and try to predict input and check with output generate loss and correct label
# Iterate and try to predict input and check with output -> generate loss and correct label
for inputs, labels in tqdm(image_dataloaders[phase]):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
Expand All @@ -65,25 +68,24 @@ def train(self, image_dataloaders, image_datasets, epochs=1):
running_corrects += torch.sum(preds == labels.data)

epoch_loss = running_loss / len(image_datasets[phase])
epoch_acc = running_corrects.float() / len(image_datasets[phase])
epoch_accuracy = running_corrects.float() / len(image_datasets[phase])

print(f"{phase} loss: {epoch_loss.item():.4f}, acc: {epoch_acc.item():.4f}")
print(f"{phase} loss: {epoch_loss.item():.4f}, acc: {epoch_accuracy.item():.4f}")

print("Runtime: (", "{0:.2f}".format(time.monotonic() - time_start), " seconds)", sep="")

return self.model

def test(self, image_dataloaders):
"""Test with test set"""
test_acc_count = 0
test_accuracy_count = 0

for k, (test_images, test_labels) in tqdm(enumerate(image_dataloaders["test"])):
test_outputs = self.model(test_images.to(self.device))
_, prediction = torch.max(test_outputs.data, 1)
test_acc_count += torch.sum(prediction == test_labels.to(self.device).data).item()
test_accuracy_count += torch.sum(prediction == test_labels.to(self.device).data).item()

test_accuracy = test_acc_count / len(image_dataloaders["test"])
print(f"Test acc: {test_accuracy}")
test_accuracy = test_accuracy_count / len(image_dataloaders["test"])

return test_accuracy

Expand Down Expand Up @@ -168,8 +170,11 @@ def main(opt):
epochs=epochs,
)

butt_bread_obj.test(image_dataloaders=image_dataloaders)
test_accuracy = butt_bread_obj.test(image_dataloaders=image_dataloaders)
print(f"Test accuracy: {test_accuracy}")

butt_bread_obj.save(model_path=model_path)
print(f"Saved model at {model_path}")


if __name__ == "__main__":
Expand Down

0 comments on commit 4b92195

Please sign in to comment.