Skip to content

Commit

Permalink
first commit
Browse files Browse the repository at this point in the history
  • Loading branch information
itsalicelee committed Jan 18, 2022
0 parents commit cb19017
Show file tree
Hide file tree
Showing 210 changed files with 100,079 additions and 0 deletions.
Binary file added .DS_Store
Binary file not shown.
Binary file added hw0/.DS_Store
Binary file not shown.
Binary file added hw0/b07303024_hw0.docx
Binary file not shown.
Binary file added hw0/dlcv_hw0.docx
Binary file not shown.
Binary file added hw0/dlcv_hw0.pdf
Binary file not shown.
Binary file added hw0/hw0.pdf
Binary file not shown.
101 changes: 101 additions & 0 deletions hw0/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import sklearn
import numpy as np
import cv2
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
# For cv2.imwrite:[0,255];
# cv2.imshow expects [0,1] for floating point and [0,255] for unsigned chars
# Usage:
# cv2.imshow("window title", array.astype('uint8'))
# cv2.waitKey(0)
# cv2.destroyAllWindows()*

trainLst = ["./p1_data/" + str(i) + "_" + str(j) + ".png" for i in range(1,41) for j in range(1,10)]
testLst = ["./p1_data/" + str(i) + "_10.png" for i in range(1,41)]

# training set contains first 9 images of each subject
X_train = np.array([cv2.imread(name,0) for name in trainLst]) # (360, 2576)
y_train = np.array([i for i in range(1,41) for _ in range(9)]) # (360,)

# testing set contains the last image of each subject
X_test = np.array([cv2.imread(name,0) for name in testLst]) # (40, 2576)
y_test = np.array([i for i in range(1,41)]) # (40,)

X_train = X_train.reshape(360,-1)
X_test = X_test.reshape(40,-1)


# * ==============Question 1==============
# mean face
mean_face = X_train.mean(axis=0)
cv2.imwrite("./results/mean_face.png", mean_face.reshape(56,46))
print("mean face shape: " + str(mean_face.shape)) # (2576,)
print("X_train shape: " + str(X_train.shape)) # (360, 2576)

#pca and first four eigenfaces
pca = PCA().fit(X_train-mean_face)
eigenfaces = pca.components_

# normalize
a = 255/(np.max(eigenfaces, axis=1) - np.min(eigenfaces, axis=1))
a = np.expand_dims(a, -1)
b = -a*np.expand_dims(np.min(eigenfaces,axis=1), -1)
eigenfaces = (eigenfaces*a + b).reshape(360, 56, 46)

def show_and_save(array, title, filename):
plt.imshow(array, cmap='gray')
plt.title(title, fontsize=20)
plt.savefig("./results/"+ filename + ".png")
plt.show()
plt.close()

show_and_save(mean_face.reshape(56,46),"mean face", "mean_face")
for i in range(4):
show_and_save(eigenfaces[i],"eigenface"+ str(i+1) , "eigenface_" + str(i+1))


# * ==============Question 2==============
img = cv2.imread("./p1_data/8_1.png",0).reshape(1,-1)
output = pca.transform(img - mean_face) # (1,360)

reconstructs = []
lst = [3,50,170,240,345]
for i in lst:
reconstruct_result = output[:,:i] @ pca.components_[:i] + mean_face # matrix multiplication
reconstructs.append(reconstruct_result.reshape(56,46))

for i in range(5):
show_and_save(reconstructs[i], "n = " + str(lst[i]), "reconstruct_" + str(i))


# * ====Question 3====
# print(reconstructs[0].shape) # (56,46)
# print(img.shape) # (1,2576)
for i in range(len(lst)):
mse = np.mean((img - reconstructs[i].reshape(1,-1))**2) # pixel-wise subtraction
print("n:{:<3d}, MSE:{:<15f}".format(lst[i], mse))


# * ====Question 4====
kLst = [1,3,5]
nLst = [3,50,170]
train_pca = pca.transform(X_train - mean_face)
#print(train_pca.shape)
#print(y_train.shape) # (360,)
for k in kLst:
for n in nLst:
clf = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(clf, train_pca[:,:n], y_train, cv=3, scoring="accuracy")
print("k={}, n={:<3d}, Acc:{:<15f}".format(k,n,scores.mean()))


# * ====Question 5====
k, n = 1, 50
test_pca = pca.transform(X_test - mean_face)
clf = KNeighborsClassifier(n_neighbors=k)
clf.fit(train_pca[:,:n], y_train)
clf.score(test_pca[:,:n], y_test)

print("Score:", clf.score(test_pca[:,:n], y_test))
Binary file added hw0/results/.DS_Store
Binary file not shown.
Binary file added hw0/results/eigenface_1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/eigenface_2.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/eigenface_3.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/eigenface_4.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/mean_face.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/reconstruct_0.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/reconstruct_1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/reconstruct_2.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/reconstruct_3.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw0/results/reconstruct_4.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added hw1/.DS_Store
Binary file not shown.
16 changes: 16 additions & 0 deletions hw1/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#Ignore the dataset
#IMPORTANT: make sure that your dataset is stored in a folder with the same as the one specofied below
hw1_data/*
*.png
src/log/*
src2/log/*
src/__pycache__/*
src2/__pycache__/*
*.pyc
results/*
INTERRUPTED.pth
model_best_*
src2/results/*
src2/checkpoints/*
src2/fcn8s-heavy-pascal.pth
*.pth
90 changes: 90 additions & 0 deletions hw1/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@

# HW1 Problem 1― Image Classification
In HW1 problem 1, you will need to implement an image classification model and answer some questions in the report.

# HW1 Problem 2― Semantic Segmentation
In HW1 problem 2, you will need to implement two semantic segmentation models and answer some questions in the report.

For more details, please click [this link](https://docs.google.com/presentation/d/1H4O5NrEK-AzS2jRggWSpzs9hHEsYlCNYQdJbHlDfH48/edit?usp=sharing) to view the slides of HW1

# Usage
To start working on this assignment, you should clone this repository into your local machine by using the following command.

git clone https://github.com/DLCV-Fall-2021/hw1-<username>.git
Note that you should replace `<username>` with your own GitHub username.

### Dataset
In the starter code of this repository, we have provided a shell script for downloading and extracting the dataset for this assignment. For Linux users, simply use the following command.

bash ./get_dataset.sh
The shell script will automatically download the dataset and store the data in a folder called `hw1_data`. Note that this command by default only works on Linux. If you are using other operating systems or you can not download the dataset by running the command above, you should download the dataset from [this link](https://drive.google.com/file/d/1CIvfO8rDMq5-3vmi0c6mrhDfzqrAZDem/view?usp=sharing) and unzip the compressed file manually.

> ⚠️ ***IMPORTANT NOTE*** ⚠️
> You should keep a copy of the dataset only in your local machine. **DO NOT** upload the dataset to this remote repository. If you extract the dataset manually, be sure to put them in a folder called `hw1_data` under the root directory of your local repository so that it will be included in the default `.gitignore` file.
### Evaluation
To evaluate your semantic segmentation model, you can run the provided evaluation script provided in the starter code by using the following command.

python3 mean_iou_evaluate.py <--pred PredictionDir> <--labels GroundTruthDir>

- `<PredictionDir>` should be the directory to your predicted semantic segmentation map (e.g. `hw1_data/prediction/`)
- `<GroundTruthDir>` should be the directory of ground truth (e.g. `hw1_data/validation/`)

Note that your predicted segmentation semantic map file should have the same filename as that of its corresponding ground truth label file (both of extension ``.png``).

### Visualization
To visualization the ground truth or predicted semantic segmentation map in an image, you can run the provided visualization script provided in the starter code by using the following command.

python3 viz_mask.py <--img_path xxxx_sat.jpg> <--seg_path xxxx_mask.png>

# Submission Rules
### Deadline
2021/10/26 (Tue.) 11:59 PM

### Late Submission Policy
You have a three-day delay quota for the whole semester. Once you have exceeded your quota, the credit of any late submission will be deducted by 30% each day.

Note that while it is possible to continue your work in this repository after the deadline, **we will by default grade your last commit before the deadline** specified above. If you wish to use your quota or submit an earlier version of your repository, please contact the TAs and let them know which commit to grade.

### Academic Honesty
- Taking any unfair advantages over other class members (or letting anyone do so) is strictly prohibited. Violating university policy would result in an **F** grade for this course (**NOT** negotiable).
- If you refer to some parts of the public code, you are required to specify the references in your report (e.g. URL to GitHub repositories).
- You are encouraged to discuss homework assignments with your fellow class members, but you must complete the assignment by yourself. TAs will compare the similarity of everyone’s submission. Any form of cheating or plagiarism will not be tolerated and will also result in an **F** grade for students with such misconduct.


### Submission Format
Aside from your own Python scripts and model files, you should make sure that your submission includes *at least* the following files in the root directory of this repository:
1. `hw1_<StudentID>.pdf`
The report of your homework assignment. Refer to the "*Submission*" section in the slides for what you should include in the report. Note that you should replace `<StudentID>` with your student ID, **NOT** your GitHub username.
2. `hw1_1.sh`
The shell script file for running your classification model.
3. `hw1_2.sh`
The shell script file for running your semantic segmentation model.

We will run your code in the following manner:

bash hw1_1.sh $1 $2
where `$1` is the testing images directory (e.g. `test/images/`), and `$2` is the path of folder where you want to output your prediction file (e.g. `test/label_pred/` ). Please do not create the output prediction directory in your bash script or python codes.

bash hw1_2.sh $1 $2
where `$1` is the testing images directory (e.g. `test/images/`), and `$2` is the output prediction directory for segmentation maps (e.g. `test/label_pred/` ). Please do not create the output prediction directory in your bash script or python codes.

### Packages
This homework should be done using python3.6. For a list of packages you are allowed to import in this assignment, please refer to the requirments.txt for more details.

You can run the following command to install all the packages listed in the requirements.txt:

pip3 install -r requirements.txt

Note that using packages with different versions will very likely lead to compatibility issues, so make sure that you install the correct version if one is specified above. E-mail or ask the TAs first if you want to import other packages.

### Remarks
- If your model is larger than GitHub’s maximum capacity (100MB), you can upload your model to another cloud service (e.g. Dropbox). However, your shell script files should be able to download the model automatically. For a tutorial on how to do this using Dropbox, please click [this link](https://goo.gl/XvCaLR).
- **DO NOT** hard code any path in your file or script, and the execution time of your testing code should not exceed an allowed maximum of **10 minutes**.
- **Please refer to HW1 slides for details about the penalty that may incur if we fail to run your code or reproduce your results.**

# Q&A
If you have any problems related to HW1, you may
- Use TA hours
- Contact TAs by e-mail ([[email protected]](mailto:[email protected]))
- Post your question under hw1 FAQ section in FB group
9 changes: 9 additions & 0 deletions hw1/get_dataset.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Download dataset
wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1E_X2X-91SVsv0u_b-57wOc_CZuxBKkvJ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1E_X2X-91SVsv0u_b-57wOc_CZuxBKkvJ" -O hw1_data.zip && rm -rf /tmp/cookies.txt

# Unzip the downloaded zip file
mkdir hw1_data
unzip ./hw1_data.zip -d hw1_data

# Remove the downloaded zip file
rm ./hw1_data.zip
3 changes: 3 additions & 0 deletions hw1/hw1_1.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# bash hw1_1.sh $1(testing images directory) $2path of output csv file(predicted labels)

python3 ./src/main.py --mode=test --test=./src/model.pth --test_data=$1 --prediction=$2
3 changes: 3 additions & 0 deletions hw1/hw1_2.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# hw1_2.sh $1(testing images directory) $2(output images directory)
wget "https://www.dropbox.com/s/ult694hblco4lra/model_unet_699.pth?dl=1" -O ./src2/model_unet_699.pth
python3 src2/main.py --mode=test --test=src2/model_unet_699.pth --model=unet --test_data=$1 --result_dir=$2
Binary file added hw1/hw1_b07303024.pdf
Binary file not shown.
Binary file added hw1/hw1_intro.pdf
Binary file not shown.
Loading

0 comments on commit cb19017

Please sign in to comment.