Skip to content

Commit cd2fffd

Browse files
first push
0 parents  commit cd2fffd

20 files changed

+1907
-0
lines changed

.DS_Store

6 KB
Binary file not shown.

.gitignore

+81
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
# Byte-compiled / optimized / DLL files
2+
__pycache__/
3+
*.py[cod]
4+
5+
# C extensions
6+
*.so
7+
8+
# Distribution / packaging
9+
.Python
10+
env/
11+
build/
12+
develop-eggs/
13+
dist/
14+
downloads/
15+
eggs/
16+
.eggs/
17+
lib/
18+
lib64/
19+
parts/
20+
sdist/
21+
var/
22+
*.egg-info/
23+
.installed.cfg
24+
*.egg
25+
26+
# PyInstaller
27+
# Usually these files are written by a python script from a template
28+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
29+
*.manifest
30+
*.spec
31+
32+
# Installer logs
33+
pip-log.txt
34+
pip-delete-this-directory.txt
35+
36+
# Unit test / coverage reports
37+
htmlcov/
38+
.tox/
39+
.coverage
40+
.coverage.*
41+
.cache
42+
nosetests.xml
43+
coverage.xml
44+
*,cover
45+
46+
# Translations
47+
*.mo
48+
*.pot
49+
50+
# Django stuff:
51+
*.log
52+
53+
# Sphinx documentation
54+
docs/_build/
55+
56+
# PyBuilder
57+
target/
58+
59+
# DotEnv configuration
60+
.env
61+
62+
# Database
63+
*.db
64+
*.rdb
65+
66+
# Pycharm
67+
.idea
68+
69+
# Jupyter NB Checkpoints
70+
.ipynb_checkpoints/
71+
72+
# exclude data from source control by default
73+
/data/
74+
src/data/*.csv
75+
src/data/posfile
76+
src/data/*.txt
77+
src/data/train
78+
src/data/train_old
79+
src/data/test
80+
src/data/traindata
81+
src/data/testdata

LICENSE

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
2+
The MIT License (MIT)
3+
Copyright (c) 2017, Swetha Subramanian
4+
5+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6+
7+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8+
9+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10+

Makefile

+127
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
.PHONY: clean data lint requirements sync_data_to_s3 sync_data_from_s3
2+
3+
#################################################################################
4+
# GLOBALS #
5+
#################################################################################
6+
7+
BUCKET = [OPTIONAL] your-bucket-for-syncing-data (do not include 's3://')
8+
PROJECT_NAME = LungNoduleCNN
9+
PYTHON_INTERPRETER = python
10+
IS_ANACONDA=$(shell python -c "import sys;t=str('anaconda' in sys.version.lower() or 'continuum' in sys.version.lower());sys.stdout.write(t)")
11+
12+
#################################################################################
13+
# COMMANDS #
14+
#################################################################################
15+
16+
## Install Python Dependencies
17+
requirements: test_environment
18+
pip install -r requirements.txt
19+
20+
## Make Dataset
21+
data: requirements
22+
$(PYTHON_INTERPRETER) src/data/make_dataset.py
23+
24+
## Delete all compiled Python files
25+
clean:
26+
find . -name "*.pyc" -exec rm {} \;
27+
28+
## Lint using flake8
29+
lint:
30+
flake8 --exclude=lib/,bin/,docs/conf.py .
31+
32+
## Upload Data to S3
33+
sync_data_to_s3:
34+
aws s3 sync data/ s3://$(BUCKET)/data/
35+
36+
## Download Data from S3
37+
sync_data_from_s3:
38+
aws s3 sync s3://$(BUCKET)/data/ data/
39+
40+
## Set up python interpreter environment
41+
create_environment:
42+
ifeq (True,$(IS_ANACONDA))
43+
@echo ">>> Detected Anaconda, creating conda environment."
44+
ifeq (3,$(findstring 3,$(PYTHON_INTERPRETER)))
45+
conda create --name $(PROJECT_NAME) python=3.5
46+
else
47+
conda create --name $(PROJECT_NAME) python=2.7
48+
endif
49+
@echo ">>> New conda env created. Activate with:\nsource activate $(PROJECT_NAME)"
50+
else
51+
@pip install -q virtualenv virtualenvwrapper
52+
@echo ">>> Installing virtualenvwrapper if not already intalled.\nMake sure the following lines are in shell startup file\n\
53+
export WORKON_HOME=$$HOME/.virtualenvs\nexport PROJECT_HOME=$$HOME/Devel\nsource /usr/local/bin/virtualenvwrapper.sh\n"
54+
@bash -c "source `which virtualenvwrapper.sh`;mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER)"
55+
@echo ">>> New virtualenv created. Activate with:\nworkon $(PROJECT_NAME)"
56+
endif
57+
58+
## Test python environment is setup correctly
59+
test_environment:
60+
$(PYTHON_INTERPRETER) test_environment.py
61+
62+
#################################################################################
63+
# PROJECT RULES #
64+
#################################################################################
65+
66+
67+
68+
#################################################################################
69+
# Self Documenting Commands #
70+
#################################################################################
71+
72+
.DEFAULT_GOAL := show-help
73+
74+
# Inspired by <http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html>
75+
# sed script explained:
76+
# /^##/:
77+
# * save line in hold space
78+
# * purge line
79+
# * Loop:
80+
# * append newline + line to hold space
81+
# * go to next line
82+
# * if line starts with doc comment, strip comment character off and loop
83+
# * remove target prerequisites
84+
# * append hold space (+ newline) to line
85+
# * replace newline plus comments by `---`
86+
# * print line
87+
# Separate expressions are necessary because labels cannot be delimited by
88+
# semicolon; see <http://stackoverflow.com/a/11799865/1968>
89+
.PHONY: show-help
90+
show-help:
91+
@echo "$$(tput bold)Available rules:$$(tput sgr0)"
92+
@echo
93+
@sed -n -e "/^## / { \
94+
h; \
95+
s/.*//; \
96+
:doc" \
97+
-e "H; \
98+
n; \
99+
s/^## //; \
100+
t doc" \
101+
-e "s/:.*//; \
102+
G; \
103+
s/\\n## /---/; \
104+
s/\\n/ /g; \
105+
p; \
106+
}" ${MAKEFILE_LIST} \
107+
| LC_ALL='C' sort --ignore-case \
108+
| awk -F '---' \
109+
-v ncol=$$(tput cols) \
110+
-v indent=19 \
111+
-v col_on="$$(tput setaf 6)" \
112+
-v col_off="$$(tput sgr0)" \
113+
'{ \
114+
printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
115+
n = split($$2, words, " "); \
116+
line_length = ncol - indent; \
117+
for (i = 1; i <= n; i++) { \
118+
line_length -= length(words[i]) + 1; \
119+
if (line_length <= 0) { \
120+
line_length = ncol - indent - length(words[i]) - 1; \
121+
printf "\n%*s ", -indent, " "; \
122+
} \
123+
printf "%s ", words[i]; \
124+
} \
125+
printf "\n"; \
126+
}' \
127+
| more $(shell test $(shell uname) == Darwin && echo '--no-init --raw-control-chars')

README.md

+56
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
LungCancerProject
2+
==============================
3+
4+
Creating a model to detect lung nodules using CNN from LUNA16 challenge
5+
6+
Project Organization
7+
------------
8+
9+
├── LICENSE
10+
├── Makefile <- Makefile with commands like `make data` or `make train`
11+
├── README.md <- The top-level README for developers using this project.
12+
├── data
13+
│   ├── external <- Data from third party sources.
14+
│   ├── interim <- Intermediate data that has been transformed.
15+
│   ├── processed <- The final, canonical data sets for modeling.
16+
│   └── raw <- The original, immutable data dump.
17+
18+
├── docs <- A default Sphinx project; see sphinx-doc.org for details
19+
20+
├── models <- Trained and serialized models, model predictions, or model summaries
21+
22+
├── notebooks <- Jupyter notebooks. Naming convention is a number (for ordering),
23+
│ the creator's initials, and a short `-` delimited description, e.g.
24+
│ `1.0-jqp-initial-data-exploration`.
25+
26+
├── references <- Data dictionaries, manuals, and all other explanatory materials.
27+
28+
├── reports <- Generated analysis as HTML, PDF, LaTeX, etc.
29+
│   └── figures <- Generated graphics and figures to be used in reporting
30+
31+
├── requirements.txt <- The requirements file for reproducing the analysis environment, e.g.
32+
│ generated with `pip freeze > requirements.txt`
33+
34+
├── src <- Source code for use in this project.
35+
│   ├── __init__.py <- Makes src a Python module
36+
│ │
37+
│   ├── data <- Scripts to download or generate data
38+
│   │   └── make_dataset.py
39+
│ │
40+
│   ├── features <- Scripts to turn raw data into features for modeling
41+
│   │   └── build_features.py
42+
│ │
43+
│   ├── models <- Scripts to train models and then use trained models to make
44+
│ │ │ predictions
45+
│   │   ├── predict_model.py
46+
│   │   └── train_model.py
47+
│ │
48+
│   └── visualization <- Scripts to create exploratory and results oriented visualizations
49+
│   └── visualize.py
50+
51+
└── tox.ini <- tox file with settings for running tox; see tox.testrun.org
52+
53+
54+
--------
55+
56+
<p><small>Project based on the <a target="_blank" href="https://drivendata.github.io/cookiecutter-data-science/">cookiecutter data science project template</a>. #cookiecutterdatascience</small></p>

notebooks/.DS_Store

6 KB
Binary file not shown.

notebooks/.gitkeep

Whitespace-only changes.

0 commit comments

Comments
 (0)