From 97709eef39fd452473d0f71e4ed3e5033014d30b Mon Sep 17 00:00:00 2001 From: Deshraj Yadav Date: Sun, 11 Feb 2018 17:28:14 -0500 Subject: [PATCH] Fix #1191 #1345: Add section about compiled libraries and evaluation script --- docs/source/architecture_decisions.md | 2 +- docs/source/conf.py | 8 ++++---- docs/source/evaluation_scripts.md | 6 ++++-- docs/source/index.rst | 5 +++-- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/docs/source/architecture_decisions.md b/docs/source/architecture_decisions.md index 018157d198..ca0bba4c2a 100644 --- a/docs/source/architecture_decisions.md +++ b/docs/source/architecture_decisions.md @@ -1,4 +1,4 @@ -## Architecture Decisions +## Architectural Decisions This is a collection of records for architecturally significant decisions. diff --git a/docs/source/conf.py b/docs/source/conf.py index 4c42170553..6c73f21697 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -61,7 +61,7 @@ # General information about the project. project = u'EvalAI' -copyright = u'2017, CloudCV Team' +copyright = u'2018, CloudCV Team' author = u'CloudCV Team' # The version info for the project you're documenting, acts as replacement for @@ -69,9 +69,9 @@ # built documents. # # The short X.Y version. -version = u'1.0' +version = u'1.1' # The full version, including alpha/beta/rc tags. -release = u'1.0' +release = u'1.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -329,7 +329,7 @@ # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'EvalAI', u'EvalAI Documentation', - author, 'EvalAI', 'One line description of project.', + author, 'EvalAI', 'Evaluating state of the art in AI', 'Miscellaneous'), ] diff --git a/docs/source/evaluation_scripts.md b/docs/source/evaluation_scripts.md index 940217dc84..4f719e3425 100644 --- a/docs/source/evaluation_scripts.md +++ b/docs/source/evaluation_scripts.md @@ -1,4 +1,4 @@ -## Evaluation Script +## Writing Evaluation Script Each challenge has an evaluation script, which evaluates the submission of participants and returns the scores which will populate the leaderboard. @@ -6,7 +6,7 @@ The logic for evaluating and judging a submission is customizable and varies fro Evaluation scripts are required to have an `evaluate` function. This is the main function, which is used by workers to evaluate the submission messages. -The syntax of evaluate function is +The syntax of evaluate function is: ``` @@ -53,3 +53,5 @@ output['result'] = [ ``` `output` should contain a key named `result`, which is a list containing entries per challenge phase split. Each challenge phase split object contains various keys, which are then displayed as columns in leaderboard. + +**Note**: If your evaluation script uses some precompiled libraries (MSCOCO for example), then make sure that the library is compiled against a Linux Distro (Ubuntu 14.04 recommended). Libraries compiled against OSx or Windows might or might not work properly. diff --git a/docs/source/index.rst b/docs/source/index.rst index 531c0cbd50..4dfe62d40d 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -12,11 +12,12 @@ Contents: :maxdepth: 2 setup + challenge_creation + evaluation_scripts + submission architecture architecture_decisions directory_structure - challenge_creation - submission migrations contribution pull_request