From 2ddc9452c95723372e553252c0d318dbd0eb7c24 Mon Sep 17 00:00:00 2001 From: jasonb Date: Mon, 4 Jan 2010 19:39:17 +1100 Subject: [PATCH] added deprecated --- .gitignore | 6 + deprecated/README | 10 + deprecated/v0.1/ACO/.cvsignore | 2 + deprecated/v0.1/ACO/ACO.tex | 80 + deprecated/v0.1/ACO/AntColonySystems.tex | 51 + deprecated/v0.1/ACO/AntSystems.tex | 50 + deprecated/v0.1/AIS/AIS.tex | 36 + .../v0.1/AIS/ClonalSelectionAlgorithm.tex | 46 + deprecated/v0.1/AIS/NegativeSelection.tex | 46 + deprecated/v0.1/Appendices/.cvsignore | 2 + deprecated/v0.1/Appendices/PseudoCode.tex | 11 + deprecated/v0.1/Background/.cvsignore | 2 + deprecated/v0.1/Background/Background.tex | 53 + deprecated/v0.1/Bibliography/.cvsignore | 2 + deprecated/v0.1/Bibliography/Bibliography.tex | 17 + deprecated/v0.1/EC/DifferentialEvolution.tex | 46 + deprecated/v0.1/EC/EC.tex | 35 + deprecated/v0.1/EC/GeneticAlgorithm.tex | 46 + deprecated/v0.1/Header/.cvsignore | 2 + deprecated/v0.1/Header/Copyright.tex | 18 + deprecated/v0.1/Header/TOC.tex | 19 + deprecated/v0.1/Header/Title.tex | 25 + deprecated/v0.1/Insights/.cvsignore | 2 + deprecated/v0.1/Insights/Insights.tex | 34 + deprecated/v0.1/PSO/PSO.tex | 35 + .../v0.1/PSO/ParticleSwarmOptimisation.tex | 46 + deprecated/v0.1/definitions.tex | 30 + deprecated/v0.1/document.tex | 78 + deprecated/v0.2/bitstringutils.rb | 15 + deprecated/v0.2/book_about.txt | 13 + deprecated/v0.2/book_algorithm_selection.rb | 28 + deprecated/v0.2/book_field.txt | 28 + deprecated/v0.2/book_ga_survey.txt | 11 + deprecated/v0.2/book_goal.txt | 10 + deprecated/v0.2/book_motivation.txt | 26 + deprecated/v0.2/book_problems.txt | 43 + deprecated/v0.2/book_toc.txt | 137 ++ .../v0.2/cellulargeneticalgorithm_code.rb | 194 ++ .../cellulargeneticalgorithm_overview.txt | 35 + .../cellulargeneticalgorithm_tutorial.txt | 101 + deprecated/v0.2/collectionutils.rb | 5 + .../v0.2/crowdinggeneticalgorithm_code.rb | 197 ++ .../crowdinggeneticalgorithm_overview.txt | 39 + .../crowdinggeneticalgorithm_tutorial.txt | 122 + deprecated/v0.2/differentialevolution_code.rb | 178 ++ .../v0.2/differentialevolution_overview.txt | 44 + .../v0.2/differentialevolution_tutorial.txt | 94 + deprecated/v0.2/evolutionprogramming_code.rb | 218 ++ .../v0.2/evolutionprogramming_overview.txt | 34 + .../v0.2/evolutionprogramming_tutorial.txt | 110 + deprecated/v0.2/evolutionstrategies_code.rb | 215 ++ .../v0.2/evolutionstrategies_overview.txt | 49 + .../v0.2/evolutionstrategies_tutorial.txt | 76 + .../v0.2/geneexpressionprogramming_code.rb | 288 +++ .../geneexpressionprogramming_overview.txt | 45 + .../geneexpressionprogramming_tutorial.txt | 143 ++ deprecated/v0.2/geneticalgorithm_code.rb | 163 ++ deprecated/v0.2/geneticalgorithm_overview.txt | 48 + deprecated/v0.2/geneticalgorithm_tutorial.txt | 119 + deprecated/v0.2/geneticprogramming_code.rb | 276 +++ .../v0.2/geneticprogramming_overview.txt | 46 + .../v0.2/geneticprogramming_tutorial.txt | 66 + deprecated/v0.2/grammaticalevolution_code.rb | 280 +++ .../v0.2/grammaticalevolution_overview.txt | 39 + .../v0.2/grammaticalevolution_tutorial.txt | 150 ++ deprecated/v0.2/introduction_background.txt | 78 + deprecated/v0.2/introduction_field.txt | 137 ++ .../v0.2/islandgeneticalgorithm_code.rb | 262 ++ .../v0.2/islandgeneticalgorithm_overview.txt | 31 + .../v0.2/islandgeneticalgorithm_tutorial.txt | 110 + deprecated/v0.2/randomsearch.rb | 96 + deprecated/v0.2/utils.rb | 124 + deprecated/v0.3/algorithm2e.sty | 2122 +++++++++++++++++ deprecated/v0.3/bibentry.sty | 75 + deprecated/v0.3/bibliography.bib | 89 + deprecated/v0.3/c_advanced.tex | 15 + deprecated/v0.3/c_evolution.tex | 13 + deprecated/v0.3/c_immune.tex | 2 + deprecated/v0.3/c_introduction.tex | 97 + deprecated/v0.3/c_neural.tex | 2 + deprecated/v0.3/c_physical.tex | 2 + deprecated/v0.3/c_probabilistic.tex | 2 + deprecated/v0.3/c_stochastic.tex | 23 + deprecated/v0.3/c_swarm.tex | 2 + deprecated/v0.3/copyright.tex | 20 + deprecated/v0.3/definitions.tex | 176 ++ .../v0.3/evolution/evolutionstrategies.tex | 25 + .../v0.3/evolution/geneticalgorithm.tex | 208 ++ .../v0.3/evolution/geneticprogramming.tex | 25 + deprecated/v0.3/master.tex | 28 + deprecated/v0.3/preface.tex | 18 + deprecated/v0.3/project/book format.txt | 23 + deprecated/v0.3/project/plan.txt | 111 + .../v0.3/stochastic/localizedrandomsearch.tex | 10 + .../v0.3/stochastic/multiplerestart.tex | 1 + deprecated/v0.3/stochastic/numbers.tex | 112 + deprecated/v0.3/stochastic/randomsearch.tex | 120 + deprecated/v0.3/upquote.sty | 76 + deprecated/v0.4/posts/README | 3 + deprecated/v0.4/src/AdaptiveRandomSearch.rb | 159 ++ deprecated/v0.4/src/GRASP.rb | 200 ++ deprecated/v0.4/src/IteratedLocalSearch.rb | 261 ++ deprecated/v0.4/src/LocalizedRandomSearch.rb | 117 + deprecated/v0.4/src/MultipleRestartSearch.rb | 157 ++ deprecated/v0.4/src/RandomSearch.rb | 111 + .../v0.4/src/StochasticDiffusionSearch.rb | 17 + deprecated/v0.4/src/TabuSearch.rb | 174 ++ deprecated/v0.5/algorithm2e.sty | 2122 +++++++++++++++++ deprecated/v0.5/bibentry.sty | 75 + deprecated/v0.5/bibliography.bib | 89 + deprecated/v0.5/c_advanced.tex | 15 + deprecated/v0.5/c_evolution.tex | 30 + deprecated/v0.5/c_immune.tex | 34 + deprecated/v0.5/c_introduction.tex | 97 + deprecated/v0.5/c_neural.tex | 2 + deprecated/v0.5/c_physical.tex | 2 + deprecated/v0.5/c_probabilistic.tex | 2 + deprecated/v0.5/c_stochastic.tex | 39 + deprecated/v0.5/c_swarm.tex | 2 + deprecated/v0.5/copyright.tex | 20 + deprecated/v0.5/definitions.tex | 176 ++ .../evolution/cellulargeneticalgorithm.tex | 18 + .../evolution/crowdinggeneticalgorithm.tex | 18 + .../v0.5/evolution/differentialevolution.tex | 145 ++ .../evolution/evolutionaryprogramming.tex | 159 ++ .../v0.5/evolution/evolutionstrategies.tex | 133 ++ .../evolution/geneexpressionprogramming.tex | 18 + .../v0.5/evolution/geneticalgorithm.tex | 167 ++ .../v0.5/evolution/geneticprogramming.tex | 115 + .../v0.5/evolution/grammaticalevolution.tex | 18 + .../v0.5/evolution/islandpopulation.tex | 18 + deprecated/v0.5/immune/clonalselection.tex | 2 + deprecated/v0.5/immune/dendriticcell.tex | 2 + deprecated/v0.5/immune/immunenetwork.tex | 2 + deprecated/v0.5/immune/negativeselection.tex | 2 + deprecated/v0.5/master.tex | 28 + deprecated/v0.5/preface.tex | 18 + deprecated/v0.5/project/plan.txt | 110 + .../v0.5/stochastic/adaptiverandomsearch.tex | 179 ++ deprecated/v0.5/stochastic/grasp.tex | 223 ++ .../v0.5/stochastic/iteratedlocalsearch.tex | 198 ++ .../v0.5/stochastic/localizedrandomsearch.tex | 137 ++ .../v0.5/stochastic/multiplerestart.tex | 181 ++ deprecated/v0.5/stochastic/numbers.tex | 112 + deprecated/v0.5/stochastic/randomsearch.tex | 129 + .../v0.5/stochastic/stochasticdiffusion.tex | 3 + deprecated/v0.5/stochastic/tabusearch.tex | 203 ++ deprecated/v0.5/upquote.sty | 76 + deprecated/v0.6/algorithm2e.sty | 2122 +++++++++++++++++ deprecated/v0.6/bibentry.sty | 75 + deprecated/v0.6/bibliography.bib | 89 + deprecated/v0.6/c_evolutionary.tex | 3 + deprecated/v0.6/copyright.tex | 17 + deprecated/v0.6/definitions.tex | 108 + .../v0.6/evolutionary/geneticalgorithm.tex | 28 + deprecated/v0.6/master.tex | 19 + deprecated/v0.6/upquote.sty | 76 + 157 files changed, 17605 insertions(+) create mode 100644 deprecated/README create mode 100755 deprecated/v0.1/ACO/.cvsignore create mode 100755 deprecated/v0.1/ACO/ACO.tex create mode 100755 deprecated/v0.1/ACO/AntColonySystems.tex create mode 100755 deprecated/v0.1/ACO/AntSystems.tex create mode 100755 deprecated/v0.1/AIS/AIS.tex create mode 100755 deprecated/v0.1/AIS/ClonalSelectionAlgorithm.tex create mode 100755 deprecated/v0.1/AIS/NegativeSelection.tex create mode 100755 deprecated/v0.1/Appendices/.cvsignore create mode 100755 deprecated/v0.1/Appendices/PseudoCode.tex create mode 100755 deprecated/v0.1/Background/.cvsignore create mode 100755 deprecated/v0.1/Background/Background.tex create mode 100755 deprecated/v0.1/Bibliography/.cvsignore create mode 100755 deprecated/v0.1/Bibliography/Bibliography.tex create mode 100755 deprecated/v0.1/EC/DifferentialEvolution.tex create mode 100755 deprecated/v0.1/EC/EC.tex create mode 100755 deprecated/v0.1/EC/GeneticAlgorithm.tex create mode 100755 deprecated/v0.1/Header/.cvsignore create mode 100755 deprecated/v0.1/Header/Copyright.tex create mode 100755 deprecated/v0.1/Header/TOC.tex create mode 100755 deprecated/v0.1/Header/Title.tex create mode 100755 deprecated/v0.1/Insights/.cvsignore create mode 100755 deprecated/v0.1/Insights/Insights.tex create mode 100755 deprecated/v0.1/PSO/PSO.tex create mode 100755 deprecated/v0.1/PSO/ParticleSwarmOptimisation.tex create mode 100755 deprecated/v0.1/definitions.tex create mode 100755 deprecated/v0.1/document.tex create mode 100644 deprecated/v0.2/bitstringutils.rb create mode 100755 deprecated/v0.2/book_about.txt create mode 100755 deprecated/v0.2/book_algorithm_selection.rb create mode 100755 deprecated/v0.2/book_field.txt create mode 100755 deprecated/v0.2/book_ga_survey.txt create mode 100755 deprecated/v0.2/book_goal.txt create mode 100755 deprecated/v0.2/book_motivation.txt create mode 100755 deprecated/v0.2/book_problems.txt create mode 100755 deprecated/v0.2/book_toc.txt create mode 100755 deprecated/v0.2/cellulargeneticalgorithm_code.rb create mode 100755 deprecated/v0.2/cellulargeneticalgorithm_overview.txt create mode 100755 deprecated/v0.2/cellulargeneticalgorithm_tutorial.txt create mode 100644 deprecated/v0.2/collectionutils.rb create mode 100755 deprecated/v0.2/crowdinggeneticalgorithm_code.rb create mode 100755 deprecated/v0.2/crowdinggeneticalgorithm_overview.txt create mode 100755 deprecated/v0.2/crowdinggeneticalgorithm_tutorial.txt create mode 100755 deprecated/v0.2/differentialevolution_code.rb create mode 100755 deprecated/v0.2/differentialevolution_overview.txt create mode 100755 deprecated/v0.2/differentialevolution_tutorial.txt create mode 100755 deprecated/v0.2/evolutionprogramming_code.rb create mode 100755 deprecated/v0.2/evolutionprogramming_overview.txt create mode 100755 deprecated/v0.2/evolutionprogramming_tutorial.txt create mode 100755 deprecated/v0.2/evolutionstrategies_code.rb create mode 100755 deprecated/v0.2/evolutionstrategies_overview.txt create mode 100755 deprecated/v0.2/evolutionstrategies_tutorial.txt create mode 100755 deprecated/v0.2/geneexpressionprogramming_code.rb create mode 100755 deprecated/v0.2/geneexpressionprogramming_overview.txt create mode 100755 deprecated/v0.2/geneexpressionprogramming_tutorial.txt create mode 100755 deprecated/v0.2/geneticalgorithm_code.rb create mode 100755 deprecated/v0.2/geneticalgorithm_overview.txt create mode 100755 deprecated/v0.2/geneticalgorithm_tutorial.txt create mode 100755 deprecated/v0.2/geneticprogramming_code.rb create mode 100755 deprecated/v0.2/geneticprogramming_overview.txt create mode 100755 deprecated/v0.2/geneticprogramming_tutorial.txt create mode 100755 deprecated/v0.2/grammaticalevolution_code.rb create mode 100755 deprecated/v0.2/grammaticalevolution_overview.txt create mode 100755 deprecated/v0.2/grammaticalevolution_tutorial.txt create mode 100755 deprecated/v0.2/introduction_background.txt create mode 100755 deprecated/v0.2/introduction_field.txt create mode 100755 deprecated/v0.2/islandgeneticalgorithm_code.rb create mode 100755 deprecated/v0.2/islandgeneticalgorithm_overview.txt create mode 100755 deprecated/v0.2/islandgeneticalgorithm_tutorial.txt create mode 100644 deprecated/v0.2/randomsearch.rb create mode 100755 deprecated/v0.2/utils.rb create mode 100755 deprecated/v0.3/algorithm2e.sty create mode 100755 deprecated/v0.3/bibentry.sty create mode 100644 deprecated/v0.3/bibliography.bib create mode 100755 deprecated/v0.3/c_advanced.tex create mode 100755 deprecated/v0.3/c_evolution.tex create mode 100755 deprecated/v0.3/c_immune.tex create mode 100755 deprecated/v0.3/c_introduction.tex create mode 100755 deprecated/v0.3/c_neural.tex create mode 100755 deprecated/v0.3/c_physical.tex create mode 100755 deprecated/v0.3/c_probabilistic.tex create mode 100755 deprecated/v0.3/c_stochastic.tex create mode 100755 deprecated/v0.3/c_swarm.tex create mode 100755 deprecated/v0.3/copyright.tex create mode 100755 deprecated/v0.3/definitions.tex create mode 100644 deprecated/v0.3/evolution/evolutionstrategies.tex create mode 100644 deprecated/v0.3/evolution/geneticalgorithm.tex create mode 100644 deprecated/v0.3/evolution/geneticprogramming.tex create mode 100755 deprecated/v0.3/master.tex create mode 100755 deprecated/v0.3/preface.tex create mode 100644 deprecated/v0.3/project/book format.txt create mode 100644 deprecated/v0.3/project/plan.txt create mode 100644 deprecated/v0.3/stochastic/localizedrandomsearch.tex create mode 100644 deprecated/v0.3/stochastic/multiplerestart.tex create mode 100644 deprecated/v0.3/stochastic/numbers.tex create mode 100644 deprecated/v0.3/stochastic/randomsearch.tex create mode 100755 deprecated/v0.3/upquote.sty create mode 100644 deprecated/v0.4/posts/README create mode 100644 deprecated/v0.4/src/AdaptiveRandomSearch.rb create mode 100644 deprecated/v0.4/src/GRASP.rb create mode 100644 deprecated/v0.4/src/IteratedLocalSearch.rb create mode 100644 deprecated/v0.4/src/LocalizedRandomSearch.rb create mode 100644 deprecated/v0.4/src/MultipleRestartSearch.rb create mode 100644 deprecated/v0.4/src/RandomSearch.rb create mode 100644 deprecated/v0.4/src/StochasticDiffusionSearch.rb create mode 100644 deprecated/v0.4/src/TabuSearch.rb create mode 100755 deprecated/v0.5/algorithm2e.sty create mode 100755 deprecated/v0.5/bibentry.sty create mode 100644 deprecated/v0.5/bibliography.bib create mode 100755 deprecated/v0.5/c_advanced.tex create mode 100755 deprecated/v0.5/c_evolution.tex create mode 100755 deprecated/v0.5/c_immune.tex create mode 100755 deprecated/v0.5/c_introduction.tex create mode 100755 deprecated/v0.5/c_neural.tex create mode 100755 deprecated/v0.5/c_physical.tex create mode 100755 deprecated/v0.5/c_probabilistic.tex create mode 100755 deprecated/v0.5/c_stochastic.tex create mode 100755 deprecated/v0.5/c_swarm.tex create mode 100755 deprecated/v0.5/copyright.tex create mode 100755 deprecated/v0.5/definitions.tex create mode 100644 deprecated/v0.5/evolution/cellulargeneticalgorithm.tex create mode 100644 deprecated/v0.5/evolution/crowdinggeneticalgorithm.tex create mode 100644 deprecated/v0.5/evolution/differentialevolution.tex create mode 100644 deprecated/v0.5/evolution/evolutionaryprogramming.tex create mode 100644 deprecated/v0.5/evolution/evolutionstrategies.tex create mode 100644 deprecated/v0.5/evolution/geneexpressionprogramming.tex create mode 100644 deprecated/v0.5/evolution/geneticalgorithm.tex create mode 100644 deprecated/v0.5/evolution/geneticprogramming.tex create mode 100644 deprecated/v0.5/evolution/grammaticalevolution.tex create mode 100644 deprecated/v0.5/evolution/islandpopulation.tex create mode 100644 deprecated/v0.5/immune/clonalselection.tex create mode 100644 deprecated/v0.5/immune/dendriticcell.tex create mode 100644 deprecated/v0.5/immune/immunenetwork.tex create mode 100644 deprecated/v0.5/immune/negativeselection.tex create mode 100755 deprecated/v0.5/master.tex create mode 100755 deprecated/v0.5/preface.tex create mode 100644 deprecated/v0.5/project/plan.txt create mode 100644 deprecated/v0.5/stochastic/adaptiverandomsearch.tex create mode 100644 deprecated/v0.5/stochastic/grasp.tex create mode 100644 deprecated/v0.5/stochastic/iteratedlocalsearch.tex create mode 100644 deprecated/v0.5/stochastic/localizedrandomsearch.tex create mode 100644 deprecated/v0.5/stochastic/multiplerestart.tex create mode 100644 deprecated/v0.5/stochastic/numbers.tex create mode 100644 deprecated/v0.5/stochastic/randomsearch.tex create mode 100644 deprecated/v0.5/stochastic/stochasticdiffusion.tex create mode 100644 deprecated/v0.5/stochastic/tabusearch.tex create mode 100755 deprecated/v0.5/upquote.sty create mode 100755 deprecated/v0.6/algorithm2e.sty create mode 100755 deprecated/v0.6/bibentry.sty create mode 100644 deprecated/v0.6/bibliography.bib create mode 100644 deprecated/v0.6/c_evolutionary.tex create mode 100755 deprecated/v0.6/copyright.tex create mode 100755 deprecated/v0.6/definitions.tex create mode 100644 deprecated/v0.6/evolutionary/geneticalgorithm.tex create mode 100755 deprecated/v0.6/master.tex create mode 100755 deprecated/v0.6/upquote.sty diff --git a/.gitignore b/.gitignore index 4b038eb4..a378bc96 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,13 @@ book/*.pdf book/*.toc book/*.out book/*.log +workspace/**/*.toc workspace/**/*.aux workspace/**/*.pdf workspace/**/*.out workspace/**/*.log +deprecated/**/*.toc +deprecated/**/*.aux +deprecated/**/*.pdf +deprecated/**/*.out +deprecated/**/*.log diff --git a/deprecated/README b/deprecated/README new file mode 100644 index 00000000..efc6a32b --- /dev/null +++ b/deprecated/README @@ -0,0 +1,10 @@ +This directory (deprecated) contains the previous epochal attempts at the clever algorithms project before the current project. + +Briefly, these attempts include: + +v0.1: January 2008, Initial attempt, Latex, called Algorithm Atlas +v0.2: December 2008, Ruby code focus, 10 evolutionary algorithms, called EC Book +v0.3: December 2008-January 2009, Cookbook format, Latex, called Tutorial Book +v0.4: January-February 2009, Tutorial format, Blog, called Inspired Algorithms +v0.5: July 2009, Unified approach, Latex, called Unified +v0.6: July 2009, A simplified approach, Latex, called Simplified diff --git a/deprecated/v0.1/ACO/.cvsignore b/deprecated/v0.1/ACO/.cvsignore new file mode 100755 index 00000000..bee904a6 --- /dev/null +++ b/deprecated/v0.1/ACO/.cvsignore @@ -0,0 +1,2 @@ +*.aux +.cvsignore diff --git a/deprecated/v0.1/ACO/ACO.tex b/deprecated/v0.1/ACO/ACO.tex new file mode 100755 index 00000000..06e7af35 --- /dev/null +++ b/deprecated/v0.1/ACO/ACO.tex @@ -0,0 +1,80 @@ +% +% ACO.tex +% + + +% +% Ant Colony Optimisation +% +\chapter{Ant Colony Optimisation} +\label{ch:aco} + + +% +% Chapter Overview +% +\section{Chapter Overview} +\label{sec:aco:overview} +what is this chapter all about + + +% +% Paradigm +% +\section{Paradigm} +\label{sec:aco:paradigm} +not sure on specific structure.. + +- what about more information? start or end of a chapter? +- what about summary of the algorithms in this chapter? +- what about something that links all the algorithms together? + +% +% Metahor +% +\subsection{Metaphor} +generalities about the ACO metaphor + +% +% Strategy +% +\subsection{Strategy} +generalities about the ACO strategy + + +% +% Operations +% +\subsection{Operations} +generalities about the ACO operations + + + + + +% +% ALGORITHMS +% + +% AS +\newpage\input{ACO/AntSystems} +% ACS +\newpage\input{ACO/AntColonySystems} + +% max min? +% rank based +% elites? +% pop aco? +\newpage + + +% +% Chapter Summary +% +\section{Chapter Summary} +\label{sec:aco:summary} +what was discussed, how does it link into the broader text? + + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/ACO/AntColonySystems.tex b/deprecated/v0.1/ACO/AntColonySystems.tex new file mode 100755 index 00000000..97161b6b --- /dev/null +++ b/deprecated/v0.1/ACO/AntColonySystems.tex @@ -0,0 +1,51 @@ +% +% AntColonySystems.tex +% + + +% +% Ant Colony Systems +% +\section{Ant Colony Systems} +\label{sec:aco:acs} + +% +% Metaphor +% +\subsection{Metaphor} +words + + + +% +% Strategy +% +\subsection{Strategy} +words + + + +% +% Operation +% +\subsection{Operation} +words + + +% +% Heuristics +% +\subsection{Heuristics} +words + + + +% +% More Information +% +\subsection{More Information} +first paper? + + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/ACO/AntSystems.tex b/deprecated/v0.1/ACO/AntSystems.tex new file mode 100755 index 00000000..a0cc11f0 --- /dev/null +++ b/deprecated/v0.1/ACO/AntSystems.tex @@ -0,0 +1,50 @@ +% +% AntSystems.tex +% + +% +% Ant Systems +% +\section{Ant Systems} +\label{sec:aco:as} + +% +% Metaphor +% +\subsection{Metaphor} +Often quoted as the first ACO algorithm, Ant Systems (AS) was borne from one of three algorithms, Ant Cycle, first proposed by Dorigo~\cite{}. + + + +% +% Strategy +% +\subsection{Strategy} +The algorithm utilises the stepwise construction and ... + + +% +% Operation +% +\subsection{Operation} +words + + +% +% Heuristics +% +\subsection{Heuristics} +words + + + +% +% More Information +% +\subsection{More Information} +first paper? + + + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/AIS/AIS.tex b/deprecated/v0.1/AIS/AIS.tex new file mode 100755 index 00000000..66c7429e --- /dev/null +++ b/deprecated/v0.1/AIS/AIS.tex @@ -0,0 +1,36 @@ +% +% AIS.tex +% + + +% +% Artificial Immune Systems +% +\chapter{Artificial Immune Systems} +\label{ch:ais} + +% +% Chapter Overview +% +\section{Chapter Overview} +\label{sec:ais:overview} +what is this chapter all about + + + +% +% ALGORITHMS +% +\newpage\input{AIS/ClonalSelectionAlgorithm} +\newpage\input{AIS/NegativeSelection} + + + +% +% Chapter Summary +% +\section{Chapter Summary} +\label{sec:ais:summary} +what was discussed, how does it link into the broader text? + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/AIS/ClonalSelectionAlgorithm.tex b/deprecated/v0.1/AIS/ClonalSelectionAlgorithm.tex new file mode 100755 index 00000000..b1a81c00 --- /dev/null +++ b/deprecated/v0.1/AIS/ClonalSelectionAlgorithm.tex @@ -0,0 +1,46 @@ +% +% ClonalSelectionAlgorithm.tex +% + + +% +% Clonal Selection Algorithm +% +\section{Clonal Selection Algorithm} +words + + +% +% Metaphor +% +\subsection{Metaphor} +words + +% +% Strategy +% +\subsection{Strategy} +words + + +% +% Operations +% +\subsection{Operations} +abstraction? + + +% +% More Information +% +\subsection{Heuristics} +words + +% +% More Information +% +\subsection{More Information} +words + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/AIS/NegativeSelection.tex b/deprecated/v0.1/AIS/NegativeSelection.tex new file mode 100755 index 00000000..e2470a3c --- /dev/null +++ b/deprecated/v0.1/AIS/NegativeSelection.tex @@ -0,0 +1,46 @@ +% +% NegativeSelection.tex +% + + +% +% Negative Selection Algorithm +% +\section{Negative Selection Algorithm} +words + + +% +% Metaphor +% +\subsection{Metaphor} +words + +% +% Strategy +% +\subsection{Strategy} +words + + +% +% Operations +% +\subsection{Operations} +abstraction? + + +% +% More Information +% +\subsection{Heuristics} +words + +% +% More Information +% +\subsection{More Information} +words + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/Appendices/.cvsignore b/deprecated/v0.1/Appendices/.cvsignore new file mode 100755 index 00000000..b969c565 --- /dev/null +++ b/deprecated/v0.1/Appendices/.cvsignore @@ -0,0 +1,2 @@ +*.aux +*.cvsignore diff --git a/deprecated/v0.1/Appendices/PseudoCode.tex b/deprecated/v0.1/Appendices/PseudoCode.tex new file mode 100755 index 00000000..b4b1fd65 --- /dev/null +++ b/deprecated/v0.1/Appendices/PseudoCode.tex @@ -0,0 +1,11 @@ +% +% PseudoCode.tex +% + +% +% Elaobrated pseudo code for algorithms discussed +% + +\chapter{Alorithm Psuedo-Code} +\label{ch:appendix:psueodcode} +no idea. \ No newline at end of file diff --git a/deprecated/v0.1/Background/.cvsignore b/deprecated/v0.1/Background/.cvsignore new file mode 100755 index 00000000..b969c565 --- /dev/null +++ b/deprecated/v0.1/Background/.cvsignore @@ -0,0 +1,2 @@ +*.aux +*.cvsignore diff --git a/deprecated/v0.1/Background/Background.tex b/deprecated/v0.1/Background/Background.tex new file mode 100755 index 00000000..fa65b458 --- /dev/null +++ b/deprecated/v0.1/Background/Background.tex @@ -0,0 +1,53 @@ +% +% Background.tex +% + + +% +% Background material to provide context for the paradigms +% +\chapter{Background} +\label{ch:background} + +% +% Overview +% +\section{Chapter Overview} +\label{ch:insights:overview} +words + + + + +% +% Metaphor +% +\section{Metaphor} +why metaphors are a good thing for problem solving\ldots + + +% +% Strategy +% +\section{Strategy} +the `search space' paradigm and why strategies are needed for traversing +problem spaces + + + + + + +% +% Summary +% +\section{Chapter Summary} +\label{ch:insights:summary} +words + + + + + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/Bibliography/.cvsignore b/deprecated/v0.1/Bibliography/.cvsignore new file mode 100755 index 00000000..b969c565 --- /dev/null +++ b/deprecated/v0.1/Bibliography/.cvsignore @@ -0,0 +1,2 @@ +*.aux +*.cvsignore diff --git a/deprecated/v0.1/Bibliography/Bibliography.tex b/deprecated/v0.1/Bibliography/Bibliography.tex new file mode 100755 index 00000000..b6ee4d15 --- /dev/null +++ b/deprecated/v0.1/Bibliography/Bibliography.tex @@ -0,0 +1,17 @@ +% +% Bibliography.tex +% + +% The Bibliography + +% add Bibliography to table of contents +\addcontentsline{toc}{chapter}{Bibliography} + +% the style for the bibliography +% plain is good for [1], [2] +% agsm is the Harvard style (needs the package natbib), gives me errors (?) +% alpha is ugly +\bibliographystyle{plain} + +% the bibTeX files to draw the citations from +\bibliography{bibliography} \ No newline at end of file diff --git a/deprecated/v0.1/EC/DifferentialEvolution.tex b/deprecated/v0.1/EC/DifferentialEvolution.tex new file mode 100755 index 00000000..66836e6b --- /dev/null +++ b/deprecated/v0.1/EC/DifferentialEvolution.tex @@ -0,0 +1,46 @@ +% +% DifferentialEvolution.tex +% + + +% +% Differential Evolution +% +\section{Differential Evolution} +words + + +% +% Metaphor +% +\subsection{Metaphor} +words + +% +% Strategy +% +\subsection{Strategy} +words + + +% +% Operations +% +\subsection{Operations} +abstraction? + + +% +% More Information +% +\subsection{Heuristics} +words + +% +% More Information +% +\subsection{More Information} +words + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/EC/EC.tex b/deprecated/v0.1/EC/EC.tex new file mode 100755 index 00000000..8da7f4a2 --- /dev/null +++ b/deprecated/v0.1/EC/EC.tex @@ -0,0 +1,35 @@ +% +% EC.tex +% + + +% +% Evolutionary Computation +% +\chapter{Evolutionary Computation} +\label{ch:ec} + +% +% Chapter Overview +% +\section{Chapter Overview} +\label{sec:ec:overview} +what is this chapter all about + +% +% ALGORITHMS +% +\newpage\input{EC/GeneticAlgorithm} +\newpage\input{EC/DifferentialEvolution} + + +% +% Chapter Summary +% +\section{Chapter Summary} +\label{sec:ec:summary} +what was discussed, how does it link into the broader text? + + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/EC/GeneticAlgorithm.tex b/deprecated/v0.1/EC/GeneticAlgorithm.tex new file mode 100755 index 00000000..fad337ed --- /dev/null +++ b/deprecated/v0.1/EC/GeneticAlgorithm.tex @@ -0,0 +1,46 @@ +% +% GeneticAlgorithm.tex +% + + +% +% Genetic Algorithm +% +\section{Genetic Algorithm} +words + + +% +% Metaphor +% +\subsection{Metaphor} +words + +% +% Strategy +% +\subsection{Strategy} +words + + +% +% Operations +% +\subsection{Operations} +abstraction? + + +% +% More Information +% +\subsection{Heuristics} +words + +% +% More Information +% +\subsection{More Information} +words + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/Header/.cvsignore b/deprecated/v0.1/Header/.cvsignore new file mode 100755 index 00000000..b969c565 --- /dev/null +++ b/deprecated/v0.1/Header/.cvsignore @@ -0,0 +1,2 @@ +*.aux +*.cvsignore diff --git a/deprecated/v0.1/Header/Copyright.tex b/deprecated/v0.1/Header/Copyright.tex new file mode 100755 index 00000000..47f3dd69 --- /dev/null +++ b/deprecated/v0.1/Header/Copyright.tex @@ -0,0 +1,18 @@ +% +% Copyright.tex +% + + +\blanknonumber +\blanknonumber + +\vspace*{\fill} + +\begin{center} +% +\copyright\ 2008, Jason Brownlee +% + +\end{center} + +\vfill\vfill\vfill \ No newline at end of file diff --git a/deprecated/v0.1/Header/TOC.tex b/deprecated/v0.1/Header/TOC.tex new file mode 100755 index 00000000..c644bd24 --- /dev/null +++ b/deprecated/v0.1/Header/TOC.tex @@ -0,0 +1,19 @@ +% +% TOC.tex +% + +% +% For table of contents stuff +% + + +% only display chapters and sections +% more information here: http://web.image.ufl.edu/help/latex/intext.shtml +\setcounter{tocdepth}{1} + +% contents +\tableofcontents +% figures +\listoffigures\blanknonumber +% tables +\listoftables\blanknonumber \ No newline at end of file diff --git a/deprecated/v0.1/Header/Title.tex b/deprecated/v0.1/Header/Title.tex new file mode 100755 index 00000000..9b4b6bec --- /dev/null +++ b/deprecated/v0.1/Header/Title.tex @@ -0,0 +1,25 @@ +% +% Title.tex +% + +\begin{titlepage} +\begin{center} + +\vspace*{\fill} \Huge +Computational Intelligence +\\ +Algorithm Atlas +\\ +\vfill\vfill\Large +Jason Brownlee +\\ +\vfill\vfill +2008 +\\ +\vfill\vfill \normalsize + +\vfill + +\end{center} + +\end{titlepage} \ No newline at end of file diff --git a/deprecated/v0.1/Insights/.cvsignore b/deprecated/v0.1/Insights/.cvsignore new file mode 100755 index 00000000..b969c565 --- /dev/null +++ b/deprecated/v0.1/Insights/.cvsignore @@ -0,0 +1,2 @@ +*.aux +*.cvsignore diff --git a/deprecated/v0.1/Insights/Insights.tex b/deprecated/v0.1/Insights/Insights.tex new file mode 100755 index 00000000..5fa7598b --- /dev/null +++ b/deprecated/v0.1/Insights/Insights.tex @@ -0,0 +1,34 @@ +% +% Insights.tex +% + + +% +% Insights made from going through so many algorithms and abstractions +% + +\chapter{Insights} +\label{ch:insights} + +% +% Overview +% +\section{Chapter Overview} +\label{ch:insights:overview} +words + + + + +% +% Overview +% +\section{Chapter Summary} +\label{ch:insights:summary} +words + + + + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/PSO/PSO.tex b/deprecated/v0.1/PSO/PSO.tex new file mode 100755 index 00000000..a0fbefa6 --- /dev/null +++ b/deprecated/v0.1/PSO/PSO.tex @@ -0,0 +1,35 @@ +% +% PSO.tex +% + + +% +% Particle Swarm Optimization +% +\chapter{Particle Swarm Optimization} +\label{ch:pso} + +% +% Chapter Overview +% +\section{Chapter Overview} +\label{sec:pso:overview} +what is this chapter all about + +% +% ALGORITHMS +% +\newpage\input{PSO/ParticleSwarmOptimisation} + + + +% +% Chapter Summary +% +\section{Chapter Summary} +\label{sec:pso:summary} +what was discussed, how does it link into the broader text? + + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/PSO/ParticleSwarmOptimisation.tex b/deprecated/v0.1/PSO/ParticleSwarmOptimisation.tex new file mode 100755 index 00000000..7dfb50cd --- /dev/null +++ b/deprecated/v0.1/PSO/ParticleSwarmOptimisation.tex @@ -0,0 +1,46 @@ +% +% ParticleSwarmOptimisation.tex +% + + +% +% Particle Swarm Optimization +% +\section{Particle Swarm Optimization} +words + + +% +% Metaphor +% +\subsection{Metaphor} +words + +% +% Strategy +% +\subsection{Strategy} +words + + +% +% Operations +% +\subsection{Operations} +abstraction? + + +% +% More Information +% +\subsection{Heuristics} +words + +% +% More Information +% +\subsection{More Information} +words + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/definitions.tex b/deprecated/v0.1/definitions.tex new file mode 100755 index 00000000..3fa789a2 --- /dev/null +++ b/deprecated/v0.1/definitions.tex @@ -0,0 +1,30 @@ +% +% definitions.tex +% + +% +% A place for package definitions and new command definitions +% + + + +% +% PACKAGES +% + + + +% +% NEW COMMMANDS +% + +% new macro for starting a new page and changing the style to empty +% \newpage == ends the current page. +% \thispagestyle == works in the same manner as the \pagestyle, except that it changes the style for the current page only. +% empty == Produces empty heads and feet - no page numbers +\newcommand{\blanknonumber}{\newpage\thispagestyle{empty}} + + + + +% EOF \ No newline at end of file diff --git a/deprecated/v0.1/document.tex b/deprecated/v0.1/document.tex new file mode 100755 index 00000000..9b739773 --- /dev/null +++ b/deprecated/v0.1/document.tex @@ -0,0 +1,78 @@ +% +% document.tex +% + +% +% The master document for the book +% + +% document type +\documentclass[a4paper,10pt]{report} +% a file containing packages and new command definitions +\include{definitions} + + +% --- Include Only, Start +% remove/comment-out to build whole book + +% just build the aco chapter +% \includeonly{ACO/ACO} + +% --- Include Only, End + + + +% principle document definition +\begin{document} + +% HEADER + + % ensure numbering is roman + \pagenumbering{roman} + % title page + \include{Header/Title}\blanknonumber + % copyright + \include{Header/Copyright}\blanknonumber + % toc + \include{Header/Toc}\blanknonumber + +% BODY + + % set page numbers to arabic, reset to 1 + \pagenumbering{arabic} + % Background + \include{Background/Background} + % Ant Colony Optimisation + \include{ACO/ACO} + % Artificial Immune Systems + \include{AIS/AIS} + % Evolutionary Computation + \include{EC/EC} + % Particle Swarm Optimisation + \include{PSO/PSO} + + + % TODO + % Physics (eo, simmulaed annealing) + % Classical (simplex, other direct search) + % Enumerative (exhasitive, greedy, hill climber, random, etc) + + % Insights + \include{Insights/Insights} + + +% APPENDICES + + % change chapter name and counters (eg Chapter 1 -> Appendix A) + \appendix + % algorithm pseudo code + \include{Appendices/PseudoCode} + + +% BIBLIOGRAPHY + + \include{Bibliography/Bibliography} + +\end{document} + +% EOF \ No newline at end of file diff --git a/deprecated/v0.2/bitstringutils.rb b/deprecated/v0.2/bitstringutils.rb new file mode 100644 index 00000000..956a221a --- /dev/null +++ b/deprecated/v0.2/bitstringutils.rb @@ -0,0 +1,15 @@ +module BitstringUtils + + def random(length) + + end + + def decode_bcd(bitstring, min, max) + + end + + def decode_gray(bitstring, min, max) + + end + +end \ No newline at end of file diff --git a/deprecated/v0.2/book_about.txt b/deprecated/v0.2/book_about.txt new file mode 100755 index 00000000..3dc82741 --- /dev/null +++ b/deprecated/v0.2/book_about.txt @@ -0,0 +1,13 @@ +Book - About + + +- there is separation between inspiration and strategy because the terminology from the inspiration can muddy the explanation of the computational/algorithmic strategy +- good practice to adopt or map onto a new nomenclature - typically does not occur + - best practice, so it is promoted in this book + + +Inspiration : analogy or metaphor for a computational process +Strategy : abstraction of inspired metaphor with a problem, solution, and suggestion an computational system / algorithm +Procedure: specifics of realizing the strategy as a computation. how it is organized, data structures used, systemaization, parameterization +Heuristic: guidelines for using executing the strategy +Tutorial: example of realizing the approach on a problem - demonstrate strategy, realize the procedure, manifest the heuristics diff --git a/deprecated/v0.2/book_algorithm_selection.rb b/deprecated/v0.2/book_algorithm_selection.rb new file mode 100755 index 00000000..21ab8a90 --- /dev/null +++ b/deprecated/v0.2/book_algorithm_selection.rb @@ -0,0 +1,28 @@ +# Book Field +# Copyright (C) 2008 Jason Brownlee +# +# Change History +# 2008/12/11 JB Created + + +# Properties +# - diversity (subjective) +# - popularity (number of results in a google search) +# - classical (relative inception date) +# - state of the art (relative inception date) + + +require 'rubygems' +require 'cgi' +require 'open-uri' +require 'hpricot' + +# based on: http://snippets.dzone.com/posts/show/4133 +q = %w{genetic algorithm}.map { |w| CGI.escape(w) }.join("+") +url = "http://www.google.com/search?q=#{q}" +doc = Hpricot(open(url).read) +# lucky_url = (doc/"div[@class='g'] a").first["href"] +# system 'open #{lucky_url}' +puts doc +# puts (doc/"a[@swrnum='swrnum']").first + diff --git a/deprecated/v0.2/book_field.txt b/deprecated/v0.2/book_field.txt new file mode 100755 index 00000000..816e5f06 --- /dev/null +++ b/deprecated/v0.2/book_field.txt @@ -0,0 +1,28 @@ +Book Field +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/11 JB Created + +Generally +- contrasted with classical AI +- appropriate when classical techniques have been exhausted +- typically iterative which can be a slow way to do things, trade off - know nothing anyway + + +Computational Intelligence +- strategies for problem solving (the how rather than the what) + +Biologically Inspired Computing (Bio-Mimicry) +- strategies inspired by biology + +Natural Computing (Nature Inspired Algorithms) +- computations inspired by the natural world applied to problem solving + +Metaheuristics +- strategies for managing classical heuristics/OR + +Soft Computing +- messy approach to AI + + diff --git a/deprecated/v0.2/book_ga_survey.txt b/deprecated/v0.2/book_ga_survey.txt new file mode 100755 index 00000000..3eb83f04 --- /dev/null +++ b/deprecated/v0.2/book_ga_survey.txt @@ -0,0 +1,11 @@ +GA Tutorial Survey + +The genetic algorithm is perhaps the most popular algorithm described in the book. It provides a good test case for assessing the state of online tutorials for computational intelligence algorithms. + +Points: + + +Cases: + + + diff --git a/deprecated/v0.2/book_goal.txt b/deprecated/v0.2/book_goal.txt new file mode 100755 index 00000000..638843e1 --- /dev/null +++ b/deprecated/v0.2/book_goal.txt @@ -0,0 +1,10 @@ +Book Goal +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/11 JB Created + +* To have more CI algorithms in one place than anywhere else ever before +* To provide a cookbook of working algorithms for all of the popular techniques in CI +* To inspire, promote, market, evangelize the field of CI +* To be meaningful for amateurs and professionals alike diff --git a/deprecated/v0.2/book_motivation.txt b/deprecated/v0.2/book_motivation.txt new file mode 100755 index 00000000..19a91891 --- /dev/null +++ b/deprecated/v0.2/book_motivation.txt @@ -0,0 +1,26 @@ +Book Motivation +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/10 JB Created + +* Inspire: The inspirations, strategies, and algorithms are interesting, people should be excited by them +* Simplicity: The core of the approaches is always simple although almost always poorly communicated. The book is about getting back to basics and communicating the core ideas of each approach +* Consistency: No ego, no names, no marketing of research, just the strategy and a demonstration. All approaches communicated in the same consistent manner for comparison. +* Programmer Focus: Like an engineer, lip service is great after the fact, what is important upfront is a working example. A demonstration that shows what it is all about and what it is trying to achieve - the underlying principles. Once you have that, you can then read the literature and begin to build a map of the field in your head. PRinciple first, detail later - almost always communicated the other way around. +* More: More algorithms, presented more clearly than ever has been done before. The 'art of computer programming' for computational intelligence, the touch stone for now and forever + + + +Use Case: +1. pick an algorithm +2. do some searches on it (IEEE, google, scholar, etc) and you will get an abundance of papers, tutorials, variations, etc +3. what do you do? what is the algorithm when there 100 versions? what is it for when there are 100 different applications? + +solution: each approach as a core inspiration and a core strategy +with these two aspects in mind we can look at the procedure and an example implementation, and heuristics for using it +- we have a working copy, now if we want to push the bounds of an approach we have a platform from which to start +- focused look at algorithms, not dumbing down, platform for expansion, comparison, etc + + + diff --git a/deprecated/v0.2/book_problems.txt b/deprecated/v0.2/book_problems.txt new file mode 100755 index 00000000..e395bab0 --- /dev/null +++ b/deprecated/v0.2/book_problems.txt @@ -0,0 +1,43 @@ +Book Motivation +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/10 JB Created + + +A suite of problems that can be used as further exercises in extending algorithm implementations. Many of these problems are used in demonstrations. The goal is to find sample problems that are simple, easy to communicate, and that readers can relate to (have meaning). Likely exposed to them in high school science or mathematics. + +Function Optimization (parametric) +- given a known functional form, find a set of coefficients for the function such that the value produce by the function is minimized or maximized + + +Logic (boolean concept learning) +- 11 multiplexer problem + + +Symbolic Regression (non-parametric) +- given a set of observations and the assumption that there relationships between the observations, find a symbolic function that maps one or a set of inputs to one or a set of outputs. + +- Pythagorean theorem (relationships between the lengths of triangles) + - http://en.wikipedia.org/wiki/Pythagorean_theorem +- Kepler’s Third Law + - http://www.dtreg.com/gep.htm +- Odd Parity + - http://www.dtreg.com/gep.htm + + +Symbolic Integration +- much like regression i'd expect + + +Function Approximation (non-parametric) +- given a set of of observations and the assumption that there are a relationship between the observations, find a sub-symbolic function that maps one or a set of the inputs to one or a set of outputs. + +Controller +- Maze +- maxpath + +Combinatorial +- TSP +- bin packing +- N Queens diff --git a/deprecated/v0.2/book_toc.txt b/deprecated/v0.2/book_toc.txt new file mode 100755 index 00000000..3b19b0f8 --- /dev/null +++ b/deprecated/v0.2/book_toc.txt @@ -0,0 +1,137 @@ + +Inspired Algorithms: + +A Machine Learning Cookbook in Ruby +Computational Intelligence Recipes in Ruby +Machine Learning Recipes with the Ruby Programming Language + + +Preface (why I wrote this book and who it's for) + +Descriptive Problem Solving (multi-disciplinary field) +- Hard and Soft Artificial Intelligence +- Machine Learning, Data Mining, Alife +- Heuristics and Meta-heuristics +- Computational Intelligence +- Natural Computation +- Biologically Inspired Computation and Bio-mimicry +- No Free Lunch + +Ruby for Programmers (enough ruby to understand examples) +- Ruby Principles +- Procedural Programming (modules, conditions, loops, types) +- Object Oriented Programming (objects, classes, inheritance) +- Function Programming (procs, lambdas, examples) +- Meta Programming (conversations, questions, conventions) + +Presentation of Algorithms +- Algorithm Overview + - inspiration (cross-disciplinary) + - strategy (description, picture) + - procedure (math, pseudo, description) + - heuristics (how to use) + - further reading (where to go for more reading) +- Algorithm Example + - problem + - solution + - algorithm + - extensions + +Stochastic Algorithms +- Random Search +- Stochastic Hill Climbing +- Stochastic Diffusion Search +- Stochastic Tunneling +- Stochastic Gradient Descent +- Greedy Randomized Adaptive Search Procedure + +Evolutionary Algorithms +- Basic Algorithms + - Genetic Algorithm + - Evolution Strategies + - Evolutionary Programming + - Genetic Programming + - Differential Evolution +- Advanced Algorithms + - Grammatical Evolution + - Gene Expression Programming + - Learning Classifier System + - Messy Genetic Algorithm (fast-messy, gene expression-messy) + - Linkage Learning Genetic Algorithm + - Double Diploid Genetic Algorithm +- Population Structures + - Crowding Genetic Algorithm + - Fitness Sharing Genetic Algorithm + - Speciation Genetic Algorithm + - Cellular Genetic Algorithm + - Island Populations Genetic Algorithm + - Co-evolutionary Algorithm + +Probabilistic Algorithms +- Classical + - Cross Entropy Method + - Gaussian Adaptation +- Estimation of Distribution Algorithms + - Bayesian Optimization Algorithm + - Hierarchical Bayesian Optimization Algorithm + +Swarm Intelligence Algorithms +- Ant Colony Algorithms + - Ant Systems + - Ant Colony System +- Particle Swarm Algorithms + - Particle Swarm Optimization + - Repulsive Particle Swarm Optimization +- Bee Algorithms + - Honey Bee Algorithm + - Artificial Bee Algorithm +- Other + - Bacterial Algorithm + - Firefly Algorithm + - Wasp Algorithm + +Immune System Algorithms +- Negative Selection Algorithm +- Immune Network Algorithm +- Clonal Selection Algorithm +- Danger Algorithm + +Natural Algorithms +- Simulated Annealing +- Extremal Optimization +- Tabu Search +- Harmony Search Algorithm +- Cultural Algorithms +- Memetic Algorithm + +Artificial Neural Networks Algorithms +- Supervised + - Perceptron + - Back Propagation +- Recurrent + - Hopfield Network +- Unsupervised + - Neural Gas Algorithm + - Self Organizing Map + - Learning Vector Quantization +- Radial Basis Function +- Boltzmann Machine Algorithm +- Spiking Neural Network + +Advanced Topics +- Testing Algorithms in Ruby + - unit testing and TDD + - test + - rspec +- Visualizing Algorithms with R and GNU Plot + - visualizing a decision surface + - visualizing a cost surface + - visualizing candidate solutions +- Saving Results and Restarting Algorithms + - check pointing + - filesystem + - relational database +- Comparing Algorithm Results with R + - statistical hypothesis testing + - population distribution + - comparing populations diff --git a/deprecated/v0.2/cellulargeneticalgorithm_code.rb b/deprecated/v0.2/cellulargeneticalgorithm_code.rb new file mode 100755 index 00000000..01dd70d8 --- /dev/null +++ b/deprecated/v0.2/cellulargeneticalgorithm_code.rb @@ -0,0 +1,194 @@ +# Cellular Genetic Algorithm in Ruby +# Copyright (C) 2008 Jason Brownlee + +# Change History +# 2008/12/11 JB Created + +require 'utils' + +# a generic binary string solution +class BinarySolution + + attr_reader :genome + attr_accessor :fitness + + def initialize() + @fitness = Numbers::NAN + end + + def initialize_random(length) + @genome = Array.new(length) {|i| Random.next_bool ? "1" : "0"} + end + + def initialize_recombination(parent1, parent2) + length = parent1.genome.length + # select a cut position + cut = Random.next_int(length - 2) + 1 + # recombine the genomes with copy errors + @genome = Array.new(length) do |i| + (i= s2.fitness ? s1 : s2 + else + s1.fitness <= s2.fitness ? s1 : s2 + end + end + + def new_solution + s = BinarySolution.new + s.initialize_random @length + return s + end + + def new_solution_recombine parent1, parent2 + s = BinarySolution.new + s.initialize_recombination parent1, parent2 + return s + end + + def is_optimal? solution + return false if solution.nil? + return true if solution.fitness == @length + end + +end + + +class CellularGeneticAlgorithm + attr_reader :best_solution + + def evolve problem + # store problem + @problem = problem + # prepare the population and state + @best_solution = nil + @generation = 0 + edge = Math.sqrt(heuristic_population_size) + @population = Array.new(edge) {|i| Array.new(edge) {|j| @problem.new_solution} } + evaluate_pop_matrix(@population) + # evolve until stop condition is triggered + evolve_population(@population) until should_stop? + end + + def evaluate_pop_matrix(pop) + pop.each do |row| + row.each do |cell| + evaluate_solution(cell) + end + end + end + + def evaluate_solution(cell) + @problem.cost(cell) + @best_solution = @problem.choose_better(@best_solution, cell) + end + + def evolve_population(pop) + # complete a fixed number of reproduction events + heuristic_population_size.times do + # select position + x, y = Random.next_int(pop.length), Random.next_int(pop[0].length) + # select random mate from the neighbourhood + mate = select_random_neighbour(pop, x, y) + # create offspring + offspring = @problem.new_solution_recombine(pop[x][y], mate) + evaluate_solution(offspring) + # compete for position in population + pop[x][y] = @problem.choose_better(pop[x][y], offspring) + end + # one more generation has completed + @generation += 1 + puts "#{@generation}, best: #{@best_solution}" + end + + # N, S, E, W (toroid) + def select_random_neighbour(pop, x, y) + neighbour = nil + case rand(4) + # north + when 0: neighbour= (x==0) ? pop[pop.length-1][y] : pop[x-1][y] + # south + when 1: neighbour= (x==(pop.length-1)) ? pop[0][y] : pop[x+1][y] + # west + when 2: neighbour= (y==0) ? pop[x][pop[x].length-1] : pop[x][y-1] + # east + when 3: neighbour= (y==pop[x].length-1) ? pop[x][0] : pop[x][y+1] + end + return neighbour + end + + def should_stop? + @problem.is_optimal?(@best_solution) or (@generation >= heuristic_total_generations) + end + + def heuristic_total_generations + 1000 + end + + def heuristic_population_size + 100 + end + + def heuristic_selection_num_bouts + 3 + end + +end + + +# run it +Random.seed(Time.now.to_f) +problem = OneMaxProblem.new(50) +algorithm = CellularGeneticAlgorithm.new +algorithm.evolve(problem) +puts "Best Found: #{algorithm.best_solution}" + diff --git a/deprecated/v0.2/cellulargeneticalgorithm_overview.txt b/deprecated/v0.2/cellulargeneticalgorithm_overview.txt new file mode 100755 index 00000000..b4f4d521 --- /dev/null +++ b/deprecated/v0.2/cellulargeneticalgorithm_overview.txt @@ -0,0 +1,35 @@ +Cellular Genetic Algorithm Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/11 JB Created + +Inspiration +: The inspiration for cellular genetic algorithms is the theory of evolution by means of natural selection over a spatial environment. + +Organisms live, interact, and compete with each other in a spacial environment. The inspiration for the cellular or diffusion genetic algorithm is spatially mediated interaction between individuals within a population. + +Strategy +: The strategy of the cellular genetic algorithm involves the projection of a population onto a plain which mediates interaction between the individuals for selection, recombination, and competition. + +The Cellular Genetic Algorithm is a distributed strategy for the genetic algorithm. The population of candidate solutions is projected onto a spatial structure (like a lattice) and each position in the structure is governed by an independent process. The extreme localization of the approach (to the individual level) and the interaction in spatially local neighborhoods shows a string resemblance to cellular automata, hence the name 'cellular'. The strategy was designed for use on massively parallel hardware. + +Procedure + +The Cellular Genetic Algorithm is concerned with managing a spatially distributed population of candidate solutions. Solutions behave autonomously, interacting with their local neighborhoods to select mates to reproduce with. The recombination, mutation, and selection genetic operators are typically used meaning that the algorithm is almost always implemented as a variation of the genetic algorithm. + +Selection typically involves an individual and an mate from the local neighborhood such as the best, randomly selected or the winner of a binary (two member) tournament. An offspring is created from the recombination of the parent and mate with low frequency copying errors. + +The distributed population and semi-independence of individuals means that a processor is assigned per individual candidate solution. Spatial population structures and neighborhoods are constrained by the underlying massively parallel hardware such as the MIMD SMID parallel paradigms. Common structures include a grid or lattice as well as ring structures. Interactions between individual candidate solutions are minimized because in a parallel computing environment that require synchronization and message passing. As such neighborhoods are typically small and localized to neighboring cells in the population structure such as 4 or 8 points on the compass in a lattice configuration. + +Heuristics +* The approach may be applied to on parallel hardware where one CPU is assigned per 'individual' (position in the spatial population structure). +* Almost always implemented using a genetic algorithm because it uses selection and recombination, although other algorithms that recombine solutions may be used in the strategy. +* Typically small neighborhood sizes are chosen to minimize interaction between individuals which requires synchronization in parallel processing environments. + +Further Reading +* Chapter 16, Diffusion (cellular) models (EC2) +* Cellular Genetic Algorithms, Operations Research/Computer Science Interfaces Series (2008) + - http://www.springer.com/business/operations+research/book/978-0-387-77609-5 +* Cellular Genetic Algorithms (1993) + - related to a cellular automata \ No newline at end of file diff --git a/deprecated/v0.2/cellulargeneticalgorithm_tutorial.txt b/deprecated/v0.2/cellulargeneticalgorithm_tutorial.txt new file mode 100755 index 00000000..fa7b0928 --- /dev/null +++ b/deprecated/v0.2/cellulargeneticalgorithm_tutorial.txt @@ -0,0 +1,101 @@ +Cellular Genetic Algorithm Tutorial +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/11 JB Created + +Introduction +This tutorial demonstrates an implementation of the cellular genetic algorithm applied to the OneMax test function. + +Problem +The problem is called OneMax and involves counting the number of 1's in a binary string. The optimal result for problem is L, where L is a parameter that defines the length of the strings used by the problem. + +def count_ones(bitstring) + bitstring.inject(0) {|sum, x| sum + ((x=='1') ? 1 : 0)} +end + +Solution +A solution is defined as a binary string and a fitness value. New candidate solutions are created as random binary strings. + +def initialize_random(length) + @genome = Array.new(length) {|i| Random.next_bool ? "1" : "0"} +end + +During the execution of an algorithmic run, new solutions are created by recombining the genetic material of two parents. A recombination scheme from genetic algorithms called one-point crossover is used. This involves selecting a random point along the fixed length chromosome, then reading the genetic material off the one parent, onto the child, half from the first parent, and half from the second. During this coping or transcription process errors are introduced with a low frequency providing an additional level of variation. + +def initialize_recombination(parent1, parent2) + length = parent1.genome.length + # select a cut position + cut = Random.next_int(length - 2) + 1 + # recombine the genomes with copy errors + @genome = Array.new(length) do |i| + (i s2.fitness ? s1 : s2 + end + + def new_solution + s = BinarySolution.new(@min, @max) + s.initialize_random(@length) + return s + end + + def new_solution_recombine(parent1, parent2) + s = BinarySolution.new(@min, @max) + s.initialize_recombination(parent1, parent2) + return s + end + + def num_optima + @optimal_coords.length + end + + def found_all_optima?(population) + num_optima_found(population) == num_optima + end + + def num_optima_found(population) + found = Array.new(num_optima) {|i| false} + # mark all found optima + population.each do |s| + @optimal_coords.each_with_index {|v, i| found[i]=true if (v-s.phenotype).abs<=0.00001} + end + return found.inject(0) {|sum, x| sum + (x ? 1 : 0)} + end + +end + + + +class DeterministicCrowdingGeneticAlgorithm + attr_reader :population + + def evolve problem + # store problem + @problem = problem + @population = Array.new(heuristic_population_size) {|i| @problem.new_solution} + # evaluate the base population + @population.each {|s| @problem.cost(s)} + # evolve until stop condition is triggered + @generation = 0 + next_generation(@population) until stop_triggered? + end + + def stop_triggered? + (@generation==heuristic_total_generations) or @problem.found_all_optima?(@population) + end + + def next_generation(pop) + # shuffle the population + # Random.shuffle_array(pop) + # the entire population participates in reproduction + (pop.length/2).times do |i| + # select parents [[0,1],[2,3],etc] + a = (i*2) + b = (i*2)+1 + p1 = pop[a] + p2 = pop[b] + # create offspring + o1 = @problem.new_solution_recombine(p1, p2) + o2 = @problem.new_solution_recombine(p2, p1) + # evaluate + @problem.cost(o1) + @problem.cost(o2) + # compete for positions in the population based on similarity then fitness + if (p1.dist(o1) + p2.dist(o2)) <= (p1.dist(o2) + p2.dist(o1)) + pop[a] = @problem.choose_better(p1, o1) + pop[b] = @problem.choose_better(p2, o2) + else + pop[a] = @problem.choose_better(p1, o2) + pop[b] = @problem.choose_better(p2, o1) + end + end + # one more generation has completed + @generation += 1 + puts "#{@generation}, avg(#{average_fitness}), found #{@problem.num_optima_found(@population)}/#{@problem.num_optima}" + end + + def heuristic_total_generations + 1000 + end + + def heuristic_population_size + @problem.num_optima * 50 + end + + def average_fitness + @population.inject(0) {|sum, x| sum+x.fitness} / @population.length.to_f + end + +end + + +# run it +Random.seed(Time.now.to_f) +problem = M1.new +algorithm = DeterministicCrowdingGeneticAlgorithm.new +algorithm.evolve problem +puts "Finished, Optima Found: #{problem.num_optima_found(algorithm.population)}/#{problem.num_optima}" + diff --git a/deprecated/v0.2/crowdinggeneticalgorithm_overview.txt b/deprecated/v0.2/crowdinggeneticalgorithm_overview.txt new file mode 100755 index 00000000..eac581c4 --- /dev/null +++ b/deprecated/v0.2/crowdinggeneticalgorithm_overview.txt @@ -0,0 +1,39 @@ +Crowding Genetic Algorithm Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/12 JB Created + +Inspiration +: The inspiration for the crowding genetic algorithm is the theory of evolution in the context of ecological niches. + +The theory of evolution describes the diversity of species, and the new synthesis of evolutionary theory integrates a mathematical perspective of genetics with the theory. An aspect of this synthesis from population ecology is the notion of a niche, a habitat to which a species or population is adapted to suit. + +The crowding genetic algorithm is inspired by the theory of evolution, genetics as the mechanism of evolution, the genotypic-phenotypic separation for genetic operators and selection, and finally the specialization of sub-populations of a species to ecological niches in an environment. + +Strategy +: The strategy of the crowding genetic algorithm is for offspring to compete with their parents based on genotypic or phenotypic similarity and phenotypic fitness, which results in the emergence of sub-populations that specialize to + +The crowding genetic algorithm belongs to a class called niching genetic algorithms. The strategy of niching the strategy is used to locate and maintain multiple good solutions to a given problem inspired by the way a population of organisms diverge into sub populations that locate and inhabit multiple ecological niches in a natural environment. The crowding strategy for niching genetic algorithms involve the use genotypic and phenotypic similarity combined with phenotypic fitness to elicit a niching effect within a population of candidate solutions. Divergence is promoted by competitive in-breeding within genotypically or phenotypically similar groups of individuals within the population. + +Procedure +The crowding algorithm is conventionally realizing as an extension to the genetic algorithm. New solutions are created using a recombination operator and coping low frequency errors are introduced during the reproduction process. Competition for survival typically occurs during the replacement (integration) of offspring into the existing population of candidate solutions. This means that most or all of the population are given the opportunity to contribute genetic material to the subsequent generation, although similarity and objective fitness during replacement govern actual contributions. + +Similarity measures are representation specific. A genotypic similarity measure for binary strings would be Hamming distance (number of different bits). A phenotypic similarity measure for real-valued vectors would be Euclidean distance (root of the sum of the squared differences). + +Heuristics +* Niching methods are appropriate for problem domains that require the location and maintenance of multiple good solutions (a diverse set of approximate solutions to the problem). Examples of such problem domains in function optimization that have multiple optima are called multi-modal optimization problems. +* The size of the subpopulations maintained at each niche is not proprtional to the objective fitness of the niche in the problem space, but rather is proprtional to the regions basin of attraction (easiness of detecting the broader structure of the niche, like the slopes on a conical optima). +* The more niches (solutions) that are desired, the large the population needs to be set such that subpopulations can be formed and maintained around located optima. +* There are problems related to miss-leading optima, that is good solutions that are attractive, but not as good as the best optima which are harder to find (pre mature convergence - start exploiting before sufficient exploration has occurred) +* It can be difficult to identify and extract the subpopulations (multiple good solutions) from the population at the end of the run. Clustering methods can be used based on genitypic/phenotypic simiularity and objective fitness. + + +Further Reading +* EC, Chapter 13 - Niching Methods +* There are a number of crowding based algorithms + * original crowding algorithm by De Jong proposed as a diversity maintenance method for a GA + * DC by Mahfoud who defined the field + * RTS by Harrik + * Probablistic Crowding +* other niching methods such as fitness sharing, sequencial niching. \ No newline at end of file diff --git a/deprecated/v0.2/crowdinggeneticalgorithm_tutorial.txt b/deprecated/v0.2/crowdinggeneticalgorithm_tutorial.txt new file mode 100755 index 00000000..8c83f28e --- /dev/null +++ b/deprecated/v0.2/crowdinggeneticalgorithm_tutorial.txt @@ -0,0 +1,122 @@ +Crowding Genetic Algorithm Tutorial +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/12 JB Created + +Overview +This tutorial provides an example of the deterministic crowding genetic algorithm, the seminal approach for crowding based genetic algorithms. The algorithm is applied to a multi-modal optimization problem called M1 from "Niching Methods for Genetic Algorithms" (1995). + +Problem +The problem is a non-linear programming problem with 5 optimal solutions [0.1, 0.3, 0.5, 0.7, 0.9] all with the fitness of 1.0. As such this problem (M1) is a one-dimensional maximizing multi-modal, with five peaks. + +def calculate(x) + Math.sin(5.0 * Math::PI * x) ** 6.0 +end + +Solution +The solution is modelled using a bitstring genome which is decoded into a floating point value in the range [0,1]. New solutions are initialized with random strings. + +def initialize_random(length) + @genome = Array.new(length) {|i| Random.next_bool ? "1" : "0"} +end + +During a run new solutions are created by recombining two parents. One point crossover is used with mutation implemented during the coping process. + +def initialize_recombination(parent1, parent2) + length = parent1.genome.length + # select a cut position + cut = Random.next_int(length - 2) + 1 + # recombine the genomes with copy errors + @genome = Array.new(length) do |i| + (i max + return x + end + + def to_s + "f=(#{@fitness})" + end + + def heuristic_crossover_factor + 0.9 + end + + def heuristic_weighting_Factor + 0.8 + end + +end + +class RosenbrocksValleyFunction + attr_reader :dimensions, :min, :max + + def initialize(dimensions=2) + @dimensions = dimensions + @min = -2.048 + @max = +2.048 + end + + def cost(solution) + solution.fitness = calculate(solution.vector) + end + + # f2(x)=sum(100·(x(i+1)-x(i)^2)^2+(1-x(i))^2) i=1:n-1; -2.048<=x(i)<=2.048. + def calculate(v) + sum = 0.0 + v.each_with_index do |x, i| + sum += 100 * (((v[i+1] - (x**2.0)) ** 2.0) + ((1.0 - x) ** 2)) if i < v.length-1 + end + return sum + end + + def is_optimal?(solution) + return false if solution.nil? + return solution.fitness <= known_optimal_fitness + end + + def known_optimal_fitness + # f(x)=0; x(i)=1, i=1:n. + calculate(Array.new(@dimensions){|i| 1.0 }) + end + + def is_maximizing? + return false + end + + def choose_better s1, s2 + return s2 if s1.nil? + return s1 if s2.nil? + # minimizing + return (s1.fitness <= s2.fitness) ? s1 : s2 + end + + def new_solution + s = DESolution.new + s.initialize_random(@dimensions, @min, @max) + return s + end + + def new_solution_offspring(p0, p1, p2, p3) + s = DESolution.new + s.initialize_offspring(p0, p1, p2, p3, @dimensions, @min, @max) + return s + end +end + +class DifferentialEvolutionAlgorithm + attr_reader :problem, :population, :generation, :best_solution + + def evolve(problem) + # initialize the system + @problem = problem + @best_solution = nil + @generation = 0 + # prepare the initial population + @population = Array.new(heuristic_population_size) { |i| @problem.new_solution } + # evaluate the initial population + evaluate_population(population) + # evolve until stop condition is triggered + @population = evolve_population(@population) until should_stop? + end + + def evaluate_population(pop) + pop.each do |solution| + @problem.cost(solution) + @best_solution = @problem.choose_better(@best_solution, solution) + end + end + + def evolve_population(population) + # create offspring + offspring = Array.new(population.length) + population.each_with_index do |current, index| + p1 = p2 = p3 = -1 + p1 = rand(population.length) until p1!=index + p2 = rand(population.length) until p2!=index and p2!=p1 + p3 = rand(population.length) until p3!=index and p3!=p1 and p3!=p2 + offspring[index] = @problem.new_solution_offspring(current, population[p1], population[p2], population[p3]) + end + # evaluate + evaluate_population(offspring) + # compete for survival + new_population = Array.new(population.length) {|i| @problem.choose_better(population[i], offspring[i])} + # one more generation has completed + @generation += 1 + puts "generation:#{@generation}, #{@best_solution}" + return new_population + end + + def should_stop? + return true if @problem.is_optimal?(@best_solution) + return true if generation >= heuristic_total_generations + return false + end + + def heuristic_total_generations + 1000 + end + + def heuristic_population_size + @problem.dimensions * 10 + end + +end + + +# problem test +srand(Time.now.to_f) +problem = RosenbrocksValleyFunction.new(5) +algorithm = DifferentialEvolutionAlgorithm.new +algorithm.evolve(problem) +puts "Finished, best solution found: #{algorithm.best_solution}" +puts "Known Optimal Fitness: #{problem.known_optimal_fitness}" \ No newline at end of file diff --git a/deprecated/v0.2/differentialevolution_overview.txt b/deprecated/v0.2/differentialevolution_overview.txt new file mode 100755 index 00000000..22a9bd19 --- /dev/null +++ b/deprecated/v0.2/differentialevolution_overview.txt @@ -0,0 +1,44 @@ +Differential Evolution Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/08 JB Completed first pass +2008/12/07 JB Created + +Inspiration +: Differential Evolution is inspired by the theory of evolution by means of natural selection. + +Strategy +: The strategy for Differential Evolution involves perturbing existing candidate solutions based on the weighted difference between existing candidate solutions. + +The Differential Evolution (DE) algorithm involves maintaining a population of candidate solutions that under go generations of recombination, evaluation, and selection. The recombination strategy is quite different to similar evolutionary algorithms involving the creation of new candidate solution components based on the weighted difference between two randomly selected population members added to a third population member (so-called classic DE). This approach perturbs population members relative relative to the spread of the broader population. In conjunction with selection, the perturbation effect self-organizes or bound the resampling of the problem space to known areas of interest. + +Procedure +The Differential Evolution algorithm involves the maintenance of a population of candidate solutions that are exposed to repeated rounds of recombination, evaluation and selection referred to as generations. + +The algorithm as a specialized nomenclature for each variation of the technique describing the specifics of the configuration used in the variation. This takes the form of DE/x/y/z, where x represents the solution to be perturbed such a random or best (in the population). The y signifies the number of difference vectors used in the perturbation of x, where a difference vectors is essentially the difference between two randomly selected although distinct members of the population. Finally, z signifies the recombination operator performed such as bin for binomial and exp for exponential. + +The classical DE is noted as DE/rand/1/bin, where as variation of this approach that creates new solutions as perturbations of the best solution in the generation is noted as DE/best/1/bin. + + +Heuristics +* The number of parents (NP) should be 10 times the number of parameters being optimized +* Use a weighting factor (F) of 0.8 +* Use a crossover rate (CR) of 0.9 +* Initialize new vectors anywhere in the valid search space +* Increase to NP should decrease F and vice-versa +* Higher crossover for DE/rand/1/bin than for DE/rand/1/exp +* CR and F almost always in [0.5, 1.0] +* Common approachs: DE/rand/1/ DE/best/2/ + +Further Reading + +* Differential Evolution - In Search of Solutions (2006) +* Differential Evolution - A Practical Approach to Global Optimization (2005) +* Advances in Differential Evolution () +* New Ideas in Optimization (1999), great section on DE +* New Optimization Techniques in Engineering + +Web +* Sample Code: http://www.icsi.berkeley.edu/~storn/code.html +* Bibliography: http://www2.lut.fi/~jlampine/debiblio.htm diff --git a/deprecated/v0.2/differentialevolution_tutorial.txt b/deprecated/v0.2/differentialevolution_tutorial.txt new file mode 100755 index 00000000..e42f4f75 --- /dev/null +++ b/deprecated/v0.2/differentialevolution_tutorial.txt @@ -0,0 +1,94 @@ +Differential Evolution Tutorial +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/08 JB Created + +Introduction +This tutorial demonstrate how to implement the classical differential evolution algorithm also known as DE/rand/1/bin. The algorithm is applied to a non linear programming problem with continuous parameters called Rosenbrock's Valley. + +Problem +The problem is a non-linear programming problem called Rosenbrock's Valley that defines an equation with a number of continuous-valued parameters. The scope of valid parameters is between -2.048 and 2.048, and parameters are added linearly and as such can be scaled to increase the difficulty of the problem. The optimal solution is located at 1.0 in each dimension and evaluates to 0.0 in this minimization problem. + +def calculate(v) + sum = 0.0 + v.each_with_index do |x, i| + sum += 100 * (((v[i+1] - (x**2.0)) ** 2.0) + ((1.0 - x) ** 2)) if i < v.length-1 + end + return sum +end + +Solution +A candidate solution to this problem is defined as a vector of floating point values, one for each dimension of the problem. A new candidate solution is initialized to a random position within the constraints of the problem definition. + +def initialize_random(length, min, max) + # random objective parameters + @vector = Array.new(length) {|i| Random.next_float_bounded(min, max)} +end + +During recombination, a new candidate solution is created from and competes with each member of the population p0. The recombination operator takes three randomly selected although distinct population members. During the creation of the offspring, at least one crossover event must occur, where as the average event is defined by a crossover rate. When a crossover event occurs, new values are calculated as the value from one of the randomly selected population members (P3) perturbed by a weighted difference vector between two other population members (P1 and P2). + +def initialize_offspring(p0, p1, p2, p3, length, min, max) + @vector = Array.new(length) + forced_cross = rand(length) + @vector.fill do |i| + if (i==forced_cross or rand < heuristic_crossover_factor) + transcribe(p3.vector[i] + heuristic_weighting_Factor * (p1.vector[i] - p2.vector[i]), min, max) + else + p0.vector[i] + end + end +end + +The effect is that points are projected based on the average spread of the population. Selection within the algorithm means that the spread of the population will be reduced over time to areas of interest within the problem space. The weighted difference vector approach therefore self-organizes re-sampling of the problem space, making use of existing information in a way quite different from most other evolutionary search algorithms. + +Newly assigned values pass through a transcribe function that ensures that the values are within the constraints of the problem definition. + +def transcribe(x, min, max) + return min if x < min + return max if x > max + return x +end + +Algorithm +The algorithm involves the initialization and evolution of a population of candidate solutions over a number of generations. + +def evolve(problem) + # initialize the system + @problem = problem + @best_solution = nil + @generation = 0 + # prepare the initial population + @population = Array.new(heuristic_population_size) { |i| @problem.new_solution } + # evaluate the initial population + evaluate_population(population) + # evolve until stop condition is triggered + @population = evolve_population(@population) until should_stop? +end + +A single generation involves creating a new candidate offspring from and to compete with each member of the population. This involves selecting three random although distinct candidate solutions from the population to participate in recombination for each candidate solution in the current population. + +def evolve_population(population) + # create offspring + offspring = Array.new(population.length) + population.each_with_index do |current, index| + p1 = p2 = p3 = -1 + p1 = rand(population.length) until p1!=index + p2 = rand(population.length) until p2!=index and p2!=p1 + p3 = rand(population.length) until p3!=index and p3!=p1 and p3!=p2 + offspring[index] = @problem.new_solution_offspring(current, population[p1], population[p2], population[p3]) + end + # evaluate + evaluate_population(offspring) + # compete for survival + new_population = Array.new(population.length) {|i| @problem.choose_better(population[i], offspring[i])} + # one more generation has completed + @generation += 1 + puts "generation:#{@generation}, #{@best_solution}" + return new_population +end + +Summary +The algorithm is deceptively simple, and the weighted difference vector approach to re-sampling the solution space requires some careful consideration. Frankly it amazes me that the approach works at all, let a lone as competitive as it does. + +Natural extensions to this implementation are to implement the other DE approaches. There are some great code examples in 'new ideas in optimization' as well as a good listing in 'New Optimization Techniques in Engineering'. diff --git a/deprecated/v0.2/evolutionprogramming_code.rb b/deprecated/v0.2/evolutionprogramming_code.rb new file mode 100755 index 00000000..1a6d6fde --- /dev/null +++ b/deprecated/v0.2/evolutionprogramming_code.rb @@ -0,0 +1,218 @@ +# Evolutionary Programming in Ruby +# Copyright (C) 2008 Jason Brownlee + +# Change History +# 2008/12/05 JB Created + +require 'utils' + + +class EPSolution + attr_reader :objective_params, :strategy_params + attr_accessor :fitness, :wins + + def initialize() + @fitness = Numbers::NAN + @wins = 0 + end + + def initialize_random(length, min, max) + # random objective parameters + @objective_params = Array.new(length) {|i| Random.next_float_bounded(min, max)} + # random strategy parameters + @strategy_params = Array.new(length) {|i| Random.next_float_bounded(0, (max-min).to_f/2.0)} + end + + def initialize_offspring(parent, length, min, max) + # populate strategy parameters + g = Random::next_gaussian + @strategy_params = Array.new(length) do |i| + transcribe_strategy(parent.strategy_params[i], g, length) + end + # populate objective values + @objective_params = Array.new(length) do |i| + transcribe_objective(parent.objective_params[i], @strategy_params[i], min, max) + end + end + + def transcribe_strategy(x, g, dimensions) + x * Math::exp((heuristic_rprime(dimensions) * g) + (heuristic_r(dimensions) * Random::next_gaussian)) + end + + def transcribe_objective(x, stdev, min, max) + o = x + stdev * Random::next_gaussian + o = min if o < min + o = max if o > max + return o + end + + def <=>(solution) + @fitness <=> solution.fitness + end + + def to_s + "f=(#{@fitness})" + end + + def heuristic_r(dimensions) + Math::sqrt(2.0 * Math::sqrt(dimensions)) ** -1.0 + end + + def heuristic_rprime(dimensions) + Math::sqrt(2.0 * dimensions) ** -1.0 + end +end + +class RastriginFunction + attr_reader :dimensions, :min, :max + + def initialize(dimensions=2) + @dimensions = dimensions + @min = -5.12 + @max = +5.12 + end + + def cost(solution) + solution.fitness = calculate(solution.objective_params) + end + + # f(x)=10·n+sum(x(i)^2-10·cos(2·pi·x(i))), i=1:n; -5.12<=x(i)<=5.12. + def calculate(v) + v.inject(10.0 * @dimensions.to_f) {|sum, x| sum + (x**2.0) - 10.0 * Math.cos(2.0 * Math::PI * x) } + end + + def is_optimal?(solution) + return false if solution.nil? + return solution.fitness <= known_optimal_fitness + end + + def known_optimal_fitness + # f(x)=0; x(i)=0, i=1:n. + calculate(Array.new(@dimensions){|i| 0.0 }) + end + + def is_maximizing? + return false + end + + def choose_better s1, s2 + return s2 if s1.nil? + return s1 if s2.nil? + # minimizing + return (s1.fitness <= s2.fitness) ? s1 : s2 + end + + def is_better?(original, other) + return true if other.fitness < original.fitness + return false + end + + def new_solution + s = EPSolution.new + s.initialize_random(@dimensions, @min, @max) + return s + end + + def new_solution_offspring(parent) + s = EPSolution.new + s.initialize_offspring(parent, @dimensions, @min, @max) + return s + end +end + +class EvolutionaryProgrammingAlgorithm + attr_reader :problem, :population, :generation, :best_solution + + def evolve(problem) + # store problem + @problem = problem + # prepare the initial population + initialize_population + # evaluate the initial population + evaluate_population(population) + # evolve until stop condition is triggered + while !should_stop? do + # create the new population + @population = evolve_population(@population) + end + end + + def initialize_population + @best_solution = nil + @generation = 0 + @population = Array.new(heuristic_population_size) { |i| @problem.new_solution } + end + + def evaluate_population(pop) + pop.each do |solution| + @problem.cost(solution) + @best_solution = @problem.choose_better(@best_solution, solution) + end + end + + def evolve_population(population) + # recombine and mutate + offspring = Array.new(population.length) do |i| + @problem.new_solution_offspring(population[i]) + end + # evaluate the newly created candidate solutions + evaluate_population(offspring) + # combine the existing and new populations + union = population + offspring + # let the solutions compete for survival + competitive_tournaments(union) + # shuffle the union in case few solutions win + Random.shuffle_array(union) + # order the union by wins desc + union.sort! { |a,b| b.wins <=> a.wins } + # select the winners for the new population + winners = union[0...population.length] + # one more generation has completed + @generation += 1 + puts "generation:#{@generation}, #{@best_solution}" + return winners + end + + def competitive_tournaments(pop) + # clear wins + pop.each {|s| s.wins = 0 } + # to get a point, each solution must better than a set of random peers + pop.each do |s| + better = true + heuristic_num_opponents.times do |i| + pos = Random::next_int(pop.length) + better = false if @problem.is_better?(s, pop[pos]) + end + s.wins += 1 if better + end + end + + def should_stop? + return true if @problem.is_optimal?(@best_solution) + return true if generation >= heuristic_total_generations + return false + end + + def heuristic_total_generations + 1000 + end + + def heuristic_population_size + 50 + end + + def heuristic_num_opponents + 5 + end + +end + + +# problem test +Random.seed(Time.now.to_f) +problem = RastriginFunction.new(2) +algorithm = EvolutionaryProgrammingAlgorithm.new +algorithm.evolve(problem) +puts "Finished, best solution found: #{algorithm.best_solution}" +puts "Known Optimal Fitness: #{problem.known_optimal_fitness}" + diff --git a/deprecated/v0.2/evolutionprogramming_overview.txt b/deprecated/v0.2/evolutionprogramming_overview.txt new file mode 100755 index 00000000..1f08abde --- /dev/null +++ b/deprecated/v0.2/evolutionprogramming_overview.txt @@ -0,0 +1,34 @@ +Evolutionary Programming Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/05 JB Created + +Inspiration +: The inspiration for evolutionary programming is the theory of evolution by means of natural selection. + +Strategy +: The evolutionary programming strategy is focused on the product of the evolutionary process for problem solving. + +The Evolutionary Programming (EP) algorithm is focused on evolution at the species level as a method of artificial intelligence, contrasted with heuristic-driven approaches. The strategy is concerned with the macro properties of evolution (adaptive fit in an environment) over micro concerns such as manipulating and concerning sub-symbolic information in artificial genomes. + +Procedure +Traditional realizations of EP make use of a variation operator applied to each member of the population independently (no recombination operator for example) and a competitive selection process between parents and offspring for the limited number of positions in the subsequent generation. The algorithm models problems using domain specific representations. + +Initialize the population +While not stop condition + Evaluate + Select breeding set + Create offspring + Select new population from union of old and new populations +End + +More recently the field has sought to merge with the broader field of evolutionary computation, incorporating conventional genetic operators and application to a broader set of problem domains. As such, modern EP bares a strong resemblance to ES given the now standard use of self-adaptive variance operators. + +Heuristics +* Heuristics to govern the mutation of self-adaptive strategy variables as well as objective variables + +Further Reading +* Evolutionary programming made faster (1999) +* Book by L Fogel: Artificial Intelligence through Simulated Evolution +* EP on Scholarpedia: http://www.scholarpedia.org/article/Evolutionary_programming \ No newline at end of file diff --git a/deprecated/v0.2/evolutionprogramming_tutorial.txt b/deprecated/v0.2/evolutionprogramming_tutorial.txt new file mode 100755 index 00000000..ec49e962 --- /dev/null +++ b/deprecated/v0.2/evolutionprogramming_tutorial.txt @@ -0,0 +1,110 @@ +Evolutionary Programming Tutorial +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/05 JB Created + +Introduction +This tutorial provides a guide for implementing the Evolutionary Programming algorithm applied to a the non-linear programming problem with continuous parameters called Rastigen's Function. + +Problem +The problem is a non-linear programming problem that defines a function with a set of unknown parameters. Such problems are common for testing optimization problems because the number of problems can be varied, to vary the difficulty of the problem. + +def calculate(v) + (10.0 * @dimensions.to_f) + v.inject(0) {|sum, x| (x**2.0) - 10.0 * Math.cos(2.0 * Math::PI * x) } +end + +The problem space is a hypercube with the boundaries of -5.12 and +5.12. The solution to the problem is known where each objective parameter has the value '0'. + +Solution +A solution to the problem is represented by an array of floating point numbers (vector). In Associated with the solution and its fitness scoring is an array of strategy parameters, one for each objective parameter. These strategy parameters control the variance genetic operator applied to each objective parameter. + +The population is seeded with candidate solutions seeded with random objective values from within the search space. During the execution of the search, each candidate solution creates a progeny candidate solution from the parents objective and strategy vectors. The variation operator is introduced per-objective parameter and implemented in the transcription of the vectors. + +def initialize_offspring(parent, length, min, max) + # populate strategy parameters + g = Random::next_gaussian + @strategy_params = Array.new(length) do |i| + transcribe_strategy(parent.strategy_params[i], g, length) + end + # populate objective values + @objective_params = Array.new(length) do |i| + transcribe_objective(parent.objective_params[i], @strategy_params[i], min, max) + end +end + +The mutation of the strategy occurs based on some heuristics proportional to the number of parameters in the problem. + +def transcribe_strategy(x, g, dimensions) + x * Math::exp((heuristic_rprime(dimensions) * g) + (heuristic_r(dimensions) * Random::next_gaussian)) +end + +Give that a solution is represented by floating point numbers, a Gaussian-based mutation operator is applied. This operator involves the re-sampling an objective parameter using a Normal distribution where the current value represents the mean, and the standard deviation is determined by the corresponding strategy parameter. New objective values are bounds tested to ensure they are valid in the context of the problem (within the constraints of the problems parameter space). + +def transcribe_objective(x, stdev, min, max) + o = x + stdev * Random::next_gaussian + o = min if o < min + o = max if o > max + return o +end + +Algorithm +The Evolutionary Programming algorithm is implemented as a reusable system for executing the strategy on a problem and producing an approximation of the optimal solution. The strategy is executed by calling the evolve function that initializes the population and executes each generation until the stop condition is met. In this case, the stop condition is defined as a set number of generations or whether the known optimal solution has been discovered. + +def evolve(problem) + # store problem + @problem = problem + # prepare the initial population + initialize_population + # evaluate the initial population + evaluate_population(population) + # evolve until stop condition is triggered + while !should_stop? do + # create the new population + @population = evolve_population(@population) + end +end + +A single generation involves the creation of a new population of offspring (one per parent), and selecting the candidate solutions from the parent and child populations to comprise the next generation. + +def evolve_population(population) + # recombine and mutate + offspring = Array.new(population.length) do |i| + @problem.new_solution_offspring(population[i]) + end + # evaluate the newly created candidate solutions + evaluate_population(offspring) + # combine the existing and new populations + union = population + offspring + # let the solutions compete for survival + competitive_tournaments(union) + # shuffle the union in case few solutions win + Random.shuffle_array(union) + # order the union by wins desc + union.sort! { |a,b| b.wins <=> a.wins } + # select the winners for the new population + winners = union[0...population.length] + # one more generation has completed + @generation += 1 + puts "generation:#{@generation}, #{@best_solution}" + return winners +end + +Competition involves combining the parent and offspring populations and for each member of this new set, comparing its fitness to a set number of opponents. If the candidate solutions is better than all opponents which it faces then it is awarded a point. After all bouts are completed the next generation of candidate solutions is selected based on the number of wins each solution was awarded during the competitive process. + +def competitive_tournaments(pop) + # clear wins + pop.each {|s| s.wins = 0 } + # to get a point, each solution must better than a set of random peers + pop.each do |s| + better = true + heuristic_num_opponents.times do |i| + pos = Random::next_int(pop.length) + better = false if @problem.is_better?(s, pop[pos]) + end + s.wins += 1 if better + end +end + +Summary +The Evolutionary Programming algorithm is very similar to ES, and in this case was demonstrated without a recombination operator and with a 'soft' selection mechanism designed to ensure the search is not too greedy. Natural extensions involves alternative heuristics for adapting the strategy parameters, the use of a recombination operator and varied selection mechanisms. \ No newline at end of file diff --git a/deprecated/v0.2/evolutionstrategies_code.rb b/deprecated/v0.2/evolutionstrategies_code.rb new file mode 100755 index 00000000..d2e30817 --- /dev/null +++ b/deprecated/v0.2/evolutionstrategies_code.rb @@ -0,0 +1,215 @@ +# Evolution Strategies in Ruby +# Copyright (C) 2008 Jason Brownlee + +# Change History +# 2008/12/05 JB Solutions bounds checking parameterized +# 2008/12/04 JB Created + +require 'utils' + +class ESSolution + attr_reader :objective_params, :strategy_params + attr_accessor :fitness + + def initialize() + @fitness = Numbers::NAN + end + + def initialize_random(length, min, max) + # random objective parameters + @objective_params = Array.new(length) {|i| Random.next_float_bounded(min, max)} + # random strategy parameters + @strategy_params = Array.new(length) {|i| Random.next_float_bounded(0, (max-min).to_f/2.0)} + end + + def initialize_recombine(parent1, parent2, length, min, max) + # populate strategy parameters + @strategy_params = Array.new(length) do |i| + transcribe_strategy(Random.next_bool ? parent1.strategy_params[i] : parent2.strategy_params[i], length) + end + # populate objective values + @objective_params = Array.new(length) do |i| + transcribe_objective(Random.next_bool ? parent1.objective_params[i] : parent2.objective_params[i], @strategy_params[i], min, max) + end + end + + def transcribe_strategy(x, length) + x * Math::exp(heuristic_tau(length) * Random::next_gaussian) + end + + def transcribe_objective(x, stdev, min, max) + o = x + stdev * Random::next_gaussian + o = min if o < min + o = max if o > max + return o + end + + def <=>(solution) + @fitness <=> solution.fitness + end + + def to_s + "f=(#{@fitness})" + end + + def heuristic_tau(length) + length.to_f ** (-1.0/2.0) + end + +end + +class SchwefelsFunction + + attr_reader :dimensions, :min, :max + + def initialize(dimensions=2) + @dimensions = dimensions + @min = -500 + @max = 500 + end + + def cost(es_solution) + es_solution.fitness = calculate(es_solution.objective_params) + end + + def calculate(real_vector) + real_vector.inject(0) {|sum, x| sum + -x * Math::sin(Math::sqrt(x.abs)) } + end + + def is_optimal?(es_solution) + return false if es_solution.nil? + return es_solution.fitness <= known_optimal_fitness + end + + def known_optimal_fitness + # really: 418.9829, reduced for rounding issues + (-(@dimensions).to_f * 418.982) + end + + def is_maximizing? + return false + end + + def choose_better s1, s2 + return s2 if s1.nil? + return s1 if s2.nil? + # minimizing + return (s1.fitness <= s2.fitness) ? s1 : s2 + end + + def new_solution + s = ESSolution.new + s.initialize_random(@dimensions, @min, @max) + return s + end + + def new_solution_recombine(parent1, parent2) + s = ESSolution.new + s.initialize_recombine(parent1, parent2, @dimensions, @min, @max) + return s + end + +end + +class EvolutionStrategiesAlgorithm + attr_reader :problem, :population, :generation, :best_solution, :plus_mode + + def initialize(plus_mode=true) + @plus_mode = plus_mode + end + + def evolve(problem) + # store problem + @problem = problem + # prepare the initial population + initialize_population + # evaluate the initial population + evaluate_population(population) + # evolve until stop condition is triggered + while !should_stop? do + if @plus_mode + plus_es + else + comma_es + end + end + end + + # (mu,lambda)-ES + def comma_es + # direct replacement + @population = evolve_population(@population) + # evaluate the population + evaluate_population(@population) + end + + # (mu+lambda)-ES + def plus_es + # create the new population + new_population = evolve_population(@population) + # evaluate the newly created candidate solutions + evaluate_population(new_population) + # combine the existing and new populations + union = @population + new_population + # rank by fitness evaluation (ascending numeric order) + union.sort! + # select the best of all available solutions + @population.fill {|i| @problem.is_maximizing? ? union[union.length-1-i] : union[i] } + end + + def initialize_population + @best_solution = nil + @generation = 0 + @population = Array.new(heuristic_population_size) + @population.fill {|index| @problem.new_solution} + end + + def evaluate_population(pop) + pop.each do |solution| + @problem.cost solution + @best_solution = @problem.choose_better @best_solution, solution + end + end + + def evolve_population(population) + # shuffle the array to promote random pairings + Random.shuffle_array(population) + # recombine and mutate + new_population = Array.new(population.length) + population.each_with_index do |solution, index| + if index.modulo(2)==0 + new_population[index] = @problem.new_solution_recombine(solution, population[index+1]) + else + new_population[index] = @problem.new_solution_recombine(solution, population[index-1]) + end + end + # one more generation has completed + @generation += 1 + puts "generation:#{@generation}, #{@best_solution}" + return new_population + end + + def should_stop? + return true if @problem.is_optimal?(@best_solution) + return true if generation >= heuristic_total_generations + return false + end + + def heuristic_total_generations + 1000 + end + + def heuristic_population_size + 60 + end + +end + + +# problem test +Random.seed(Time.now.to_f) +problem = SchwefelsFunction.new(3) +algorithm = EvolutionStrategiesAlgorithm.new(true) +algorithm.evolve(problem) +puts "Finished, best solution found: #{algorithm.best_solution}" +puts "Known Optimal Fitness: #{problem.known_optimal_fitness}" \ No newline at end of file diff --git a/deprecated/v0.2/evolutionstrategies_overview.txt b/deprecated/v0.2/evolutionstrategies_overview.txt new file mode 100755 index 00000000..0957ce13 --- /dev/null +++ b/deprecated/v0.2/evolutionstrategies_overview.txt @@ -0,0 +1,49 @@ +Evolution Strategies Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/04 JB Created + +Inspiration +: Evolution Strategies are inspired by the general theory of evolution by means of natural selection. + +Strategy +: The strategy of Evolutionary Strategies is evolution with self-adaptive mutation. + +Evolutionary Strategies are based on a general evolutionary system where heuristics govern the application of mutation. Traditionally this strategy involved increasing and decreasing a global mutation rate based on the progress of the evolutionary search (such as multiple or divide by 1/5 respectively). The intention of this adaptive control is search efficiency, for example the system may take large steps in across neural regions of the search space or along gradients, and smaller steps around optima. + +More recently this strategy has been realized through the co-evolution of parameters that control the amount of evolution that occurs either globally or per problem specific parameter. The strategy parameters control the scope of the mutation operator, influencing the step size (in problem space) of changes that may be introduced during a search. This introduces a fine level of control over the self-adaptation of candidate solutions governed by the same evolutionary process that governs the broader search. + + +Procedure +: The procedure for Evolution Strategies involves a classical generational model of selection, recombination and mutation. + +The Evolution Strategy algorithm models problem domains in the problem space, requiring problem specific recombination and variation operators. A popular class of problem for the algorithm are non-linear optimization problems with real-valued function parameters. In this domain recombination may involve a traditional operator such as one-point crossover or a domain specific approach such as calculating the central tendency. Gaussian re-sampling around existing candidate solutions is used as the variation operator where strategy parameters influence the standard deviation of the distribution from which the random numbers are drawn. + +The field has its own nomenclature to describe the setup of a given strategy. For example a population has mu individuals which create lambda offspring each generation. The offspring may directly replace the parent population which is denoted as (mu,lambda)-ES. Alternatively, members from the two populations may compete with each other for positions in the next generation, denoted as (m+lambda)-ES. The numbers can be varied promoting selective pressure or generational gap, for example (1+5)-ES denotes a 1-member population that creates 5 offspring each generation the best of which competes with the parent for survival. + +Randomly initialize the population +While not Stop Condition + Evaluate + Select Parents + For each + Recombine to create offspring + Mutate strategy parameters + Mutate problem parameters (using mutated strategy parameters) + End + Replace Current Population +End + +Heuristics +* Maintain a modest population size (1lambda{|a,b| a*b}, + "/"=>lambda{|a,b| a/b}, + "+"=>lambda{|a,b| a+b}, + "-"=>lambda{|a,b| a-b} + } + @terminals = { + "x"=>lambda{current_point} + } + end + + def head_symbols + @functions.keys + @terminals.keys + end + + def tail_symbols + @terminals.keys + end + + def lookup(symbol) + @functions[symbol] || @terminals[symbol] + end + + # y = a^4 + a^3 + a^2 + a^1 + def source(x) + (x**4.0) + (x**3.0) + (x**2.0) + x + end + + def next_point + @point_index = (@point_index+1>=heuristic_num_points) ? 0 : (@point_index + 1) + end + + def current_point + @sample_points[@point_index] + end + + def cost(solution) + # parse expression + # genome is reversed because stack behavior operates on the end of an array in ruby + solution.expression = breadth_first_mapping(solution.genome.reverse) + # sum errors in the model + sum_errors = 0.0 + heuristic_num_points.times do |i| + score = solution.expression.eval + score = 10.0 if (score.nan? or !score.finite?) + sum_errors += (score - source(current_point)).abs + next_point + end + solution.fitness = sum_errors + end + + # breadth first + def breadth_first_mapping(symbols) + queue = Array.new + # create the root + root = GEPNode.new(lookup(symbols.pop)) + # push root onto the queue + queue.push(root) + # process the queue until empty + while !queue.empty? do + # dequeue (start) + curr = queue.shift + # process children + if curr.value.arity == 2 + # create and enqueue (end) left + curr.left = GEPNode.new(lookup(symbols.pop)) + queue.push(curr.left) + # create and enqueue (end) right + curr.right = GEPNode.new(lookup(symbols.pop)) + queue.push(curr.right) + end + end + return root + end + + def choose_better(s1, s2) + return s2 if s1.nil? + return s1 if s2.nil? + return (s1.fitness <= s2.fitness) ? s1 : s2 + end + + def new_solution + s = GEPSolution.new(head_symbols, tail_symbols) + s.initialize_random + return s + end + + def new_solution_recombine(parent1, parent2) + s = GEPSolution.new(head_symbols, tail_symbols) + s.initialize_recombination(parent1, parent2) + return s + end + + def is_optimal?(solution) + !solution.nil? and (solution.fitness == 0.0) + end + + def heuristic_num_points + 10 + end + +end + + +class GeneExpressionProgrammingAlgorithm + attr_reader :problem, :population, :generation, :best_solution + + def evolve problem + # store problem + @problem = problem + # prepare the population and state + @best_solution = nil + @generation = 0 + @population = Array.new(heuristic_population_size) {|i| @problem.new_solution} + # evolve until stop condition is triggered + @population = evolve_population(@population) until should_stop? + end + + def evaluate(pop) + pop.each do |solution| + @problem.cost(solution) + @best_solution = @problem.choose_better(@best_solution, solution) + end + end + + def evolve_population(population) + # evaluate + evaluate(population) + # select + selected = population.collect {|solution| tournament_select solution, population} + # recombine and mutate + new_population = Array.new(population.length) + selected.each_with_index do |solution, index| + # probabilistic crossover or promotion + if Random.next_float < heuristic_crossover_rate + if index.modulo(2)==0 + new_population[index] = @problem.new_solution_recombine(solution, selected[index+1]) + else + new_population[index] = @problem.new_solution_recombine(solution, selected[index-1]) + end + else + new_population[index] = solution + end + end + # one more generation has completed + @generation += 1 + puts "#{@generation}, best: #{@best_solution}" + return new_population + end + + # tournament selection with reselection + def tournament_select(base, population) + bouts = 1 + while bouts <= heuristic_selection_num_bouts do + pos = Random.next_int(population.length) + base = @problem.choose_better(base, population[pos]) + bouts += 1 + end + return base + end + + def should_stop? + @problem.is_optimal?(@best_solution) or (@generation >= heuristic_total_generations) + end + + def heuristic_total_generations + return 100 + end + + def heuristic_population_size + 60 + end + + def heuristic_crossover_rate + 0.7 + end + + def heuristic_selection_num_bouts + 3 + end + +end + + +# run it +seed = Time.now.to_f +puts "Random number seed: #{seed}" +Random.seed(seed) +problem = SymbolicRegressionProblem.new +algorithm = GeneExpressionProgrammingAlgorithm.new +algorithm.evolve(problem) +puts "Best of Run: #{algorithm.best_solution}" diff --git a/deprecated/v0.2/geneexpressionprogramming_overview.txt b/deprecated/v0.2/geneexpressionprogramming_overview.txt new file mode 100755 index 00000000..512a50c4 --- /dev/null +++ b/deprecated/v0.2/geneexpressionprogramming_overview.txt @@ -0,0 +1,45 @@ +Gene Expression Programming Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/10 JB Created + +Inspiration +: Gene Expression Programming is inspired by the theory of evolution by means of natural selection. + +Gene Expression Programming (GEP) is inspired by a computational perspective of evolution. The algorithm is inspired by the genotype-phenotype model where DNA provides a low level definition of an organism the genes of which are read to produce proteins and ultimately the phenotype of the organism. + +Strategy +: The strategy for Gene Expression Programming is an evolutionary process that operates on a sub-symbolic representation of candidate solutions which is translated (expressed) as a solution to a given problem to be evaluated. + +Gene Expression Programming uses a linear genome as the basis for genetic operators such as mutation, recombination, inversion, and transposition. The genome is comprised of chromosomes (typically one) and each chromosome is comprised of genes (one or more) that are translated into an expression tree to address a given problem. The strategy attempts to solve problems by creating programs represented as expression trees (like GP) although applies the genetic operators to a sub-symbolic representation of the expression trees (unlike a GP, more like a GA). As such it is similar in strategy to GE. + +Procedure +A candidate solution is represented as a linear representation of an expression tree where each symbol maps to a function or terminal node. The linear representation is mapped to an expression tree in a breadth-first manner. The compressed representation is referred to as Karva notation or a K-expression within the context of the GEP field. + +A genome or K-expression is fixed length and is comprised of one or more sub-expressions (genes), which are also defined with a fixed length. A gene is comprised of two sections, a head which may contain any function or terminal symbols, and a tail section that may only contain terminal symbols. The size of the head is a system parameter, where as the tail length is calculated via a heuristic method that ensures that each gene will always translate (express) syntactically correct expression trees. The tail portion of the gene provides a genetic buffer which ensures closure of the genes expression. As such some or none of the information in the tail may be expressed, providing a flexible non-coding region in candidate solutions engendering the evolution of evolvability. + +The robust gene definition means that genetic operators can be applied to the sub-symbolic representation without concern for the resultant gene expression, providing separation of genotype and phenotype. + +For difficult problems, multiple genes (sub-expressions) are used and combined to address the problem. The sub-expressions are linked using link expressions which are function nodes that are either statically defined (such as a conjunction) or evolved on the genome with the genes. + +The mutation operate substituted expressions along the genome, although must respect the gene rules such that function and terminal nodes are mutated in the head of genes, whereas only terminal nodes are substituted in the tail of genes. Crossover occurs between two selected parents from the population and can occur based on a one-point cross, two point cross, or finally a gene-based approach were genes are selected from the parents with uniform probability. + +An inversion operator may be used with a low probability that reverses a small sequence of symbols (1-3) within a section of a gene (tail or head). A transposition operator may be used that has a number of different modes, including: duplicate a small sequences (1-3) from somewhere on a gene to the head, small sequences on a gene to the root of the gene, and moving of entire genes on the chromosome. In the case of intra-gene transpositions, the sequence in the head of the gene is moved down to accommodate the copied sequence and the length of the head is truncated to maintain consistent gene sizes. + +Numeric constants can be included in the terminal node set, although GEP defines an alternative method. A '?' is included in the terminal set that represents a numeric constant from an array that are evolved on the end of the genome. The constants are read from the end of the genome and are substituted for '?' as the expression tree is created (in breadth first order). Finally the numeric constants are used as array indices in yet another chromosome of numerical values which are substituted into the expression tree. + +Heuristics +* Use of parsimony pressure to create expressions that solve the problem that are also small (concise and easier to read). For example, the objective fitness function can be scaled based on the number of nodes in the expression tree, perhaps a ratio of genome length to resultant expression tree. +* The length of a chromosome is defined by the number of genes, where a gene length is defined by h + t. h is a user defined parameter (such as 10), where as t is defined as t = h (n-1) + 1. n represents the maximum arity of functional nodes in the expression (such as 2 if the arithmetic function */-+ are used) +* Mutation is typically 1/L, selection can be any of the classical approaches (such as roulette wheel or tournament), and crossover rates are typically high (0.7 of offspring) +* Use multiple sub-expressions linked together on hard problems when one gene does not get much progress. Provides modularity in the solution for both evolvability and ultimate readability. + + +Further Reading +* Gene Expression programming: mathematical modeling by an artificial intelligence, 2nd edition (2006) +* Gene Expression Programming: Mathematical Modeling by an Artificial Intelligence, 1st edition (2002) + - online: http://www.gene-expression-programming.com/GepBook/Introduction.htm +* Gene Expression Programming: A New Adaptive Algorithm for Solving Problems, Complex Systems, 13 (2): 87-129, 2001 + +Home: http://www.gene-expression-programming.com/ diff --git a/deprecated/v0.2/geneexpressionprogramming_tutorial.txt b/deprecated/v0.2/geneexpressionprogramming_tutorial.txt new file mode 100755 index 00000000..ae3dcff0 --- /dev/null +++ b/deprecated/v0.2/geneexpressionprogramming_tutorial.txt @@ -0,0 +1,143 @@ +Gene Expression Programming Tutorial +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/10 JB Created + +Introduction +This tutorial demonstrates an implementation of Gene Expression Programming applied to a symbolic regression problem. It is a reproduction of the experiment in the seminal paper: Gene Expression Programming: A New Adaptive Algorithm for Solving Problems (2001). + +Problem +The problem is a symbolic regression problem that involves finding a function that can map a set of inputs to a set of outputs. The function used to generate the sample data is: f(x) = x^4 + x^3 + x^2 + x^1. The range of inputs is bounded to [1,20]. + +def source(x) + (x**4.0) + (x**3.0) + (x**2.0) + x +end + +The set of function nodes available is limited to [+, -, *, /] and the terminal nodes is limited to the input value [x]. These are modeled as lambda's such that an expression tree can be modeled as a tree structure that can be executed recursively. + +@functions = { + "*"=>lambda{|a,b| a*b}, + "/"=>lambda{|a,b| a/b}, + "+"=>lambda{|a,b| a+b}, + "-"=>lambda{|a,b| a-b} + } +@terminals = { + "x"=>lambda{current_point} +} + +A candidate solution is comprised of a sequence of symbols that are mapped onto lambdas. The mapping occurs in a breadth first manner without recurision. This is achieved by using a queue, the classical method for processing a graph in a breadth first manner. For each node, the symbol is read from the chromosome, it is translated to a lambda and stored within the graph structure. + +def breadth_first_mapping(symbols) + queue = Array.new + # create the root + root = GEPNode.new(lookup(symbols.pop)) + # push root onto the queue + queue.push(root) + # process the queue until empty + while !queue.empty? do + # dequeue (start) + curr = queue.shift + # process children + if curr.value.arity == 2 + # create and enqueue (end) left + curr.left = GEPNode.new(lookup(symbols.pop)) + queue.push(curr.left) + # create and enqueue (end) right + curr.right = GEPNode.new(lookup(symbols.pop)) + queue.push(curr.right) + end + end + return root +end + +The evaluation of a candidate solution involves mapping the sequence to an expression tree. The expression tree is then evaluated 10 times with 10 different input parameters. The values produced by the candidate solution are compared to ground truth and the sum of the absolute differences are assigned to the solution as the fitness score. A large (10) problem specific penalty error is assigned each time a candidate solution generates a value that is NaN or infinite. This may happen if a divide by zero or overflow occurs. + +def cost(solution) + # parse expression + # genome is reversed because stack behavior operates on the end of an array in ruby + solution.expression = breadth_first_mapping(solution.genome.reverse) + # sum errors in the model + sum_errors = 0.0 + heuristic_num_points.times do |i| + score = solution.expression.eval + score = 10.0 if (score.nan? or !score.finite?) + sum_errors += (score - source(current_point)).abs + next_point + end + solution.fitness = sum_errors +end + +Solution +The solution is defined as a string of symbols that are mapped into functions in the problem. New candidate solutions are initialized as a random sequences of symbols. + +def initialize_random + @genome = Array.new(heuristic_length) do |i| + (iother.fitness : @fitness=@config[:max_generations] or solution.fitness==problem.optimal_score + return solution + end + + def evolve_population(pop, problem) + selected = pop.collect {|sol| tournament_select(sol, pop, problem)} + selected.each_with_index do |sol, i| + if (rand < @config[:crossover]) + other = (i.modulo(2)==0) ? selected[i+1] : selected[i-1] + pop[i] = sol.recombine(@config[:mutation], other) + else + pop[i] = sol.recombine(@config[:mutation]) + end + end + end + + def tournament_select(base, pop, problem) + @config[:num_bouts].times do + other = pop[rand(pop.length)] + base = other if other.is_better?(problem, base) + end + return base + end +end + + +# run it +seed = Time.now.to_f +srand(seed) +puts "Random seed: #{seed}" +problem = Sphere.new(3) +algorithm = GeneticAlgorithm.new +algorithm.configure(problem, 1000) +solution = algorithm.evolve(problem) +puts "Finished, solution:#{solution}, optimal:#{problem.optimal_score}" + diff --git a/deprecated/v0.2/geneticalgorithm_overview.txt b/deprecated/v0.2/geneticalgorithm_overview.txt new file mode 100755 index 00000000..8a94fcd7 --- /dev/null +++ b/deprecated/v0.2/geneticalgorithm_overview.txt @@ -0,0 +1,48 @@ +Genetic Algorithm Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/03 JB Created + +Inspiration +: The inspiration for the Genetic Algorithm is a genetic perspective of the theory of evolution. + +The theory of evolution by means of natural selection accounts for the diversity of life of the planet. It describes a situation where organisms reproduce in quantities greater than can be supported by the environment, the uniqueness of individuals in the population given low frequency genetic mutations during cell division, and the competition for survival in an environment biased by individual variance. In this situation, the theory describes a process where beneficial differences between individuals accumulate in the population over time. The theory describes a natural process where organisms improve their adaptive fit for their environment. + +Strategy +: The strategy for the Genetic Algorithm is to improve the fitness of a population of candidate solutions in the context of an objective function using repeated rounds of selection and variation. + +The Genetic Algorithm is inspired by a genetic perspective of the theory of evolution by means of natural selection. The inspiration focuses on a model genetic system of an organism defined by a genome (typically one chromosome and a number of genes). The genome is assessed using an objective function (fitness function) which typically involves translating or mapping the genetic code (genotype) to a problem specific representation (phenotype). + +Procedure +: The procedure for the Genetic Algorithm involves a large number of iterations called generations where the population is evaluated, the fitter members of which are selected to contribute their genetic material in the creation of the subsequent generation with minor copying errors. + +The Genetic Algorithm starts with a randomly initialized population of candidate solutions defined as binary strings. Each algorithm iteration, the population is exposed to a series of genetic operator functions that manipulate the population. The candidate solutions are first evaluated against a cost function. A subset of candidate solutions are selected proportional to their relative fitness and recombined to produce new candidate solutions that displace the current population. The creation of new child candidate solutions are random mixes the binary strings of two parents. Low frequency random copying errors are introduced during the recombination process providing a background level of diversity. + +Initialization +Loop + Evaluation + Selection + Recombination + Mutation + Replacement +End + +Heuristics + +: The heuristics for the Genetic Algorithm are moderate population sizes, not too greedy, not too random, and try to preserve patterns of genetic material across generations. + +* Binary strings are the standard genetic representation when implementing the algorithm and its genetic operators, although domain specific representations may be used and the operators modified to support domain specific constraints +* The selection process must be balanced between random selection and greedy selection to biased towards fitter candidate solutions (exploitation), whilst promoting useful diversity into the population (exploration). +* Recombination is intended to create new candidate solutions with the favorable attributes of both parents, and as such should attempt to preserve patterns (sub-sequences and/or patterns) of both parents. +* Mutation is a low frequency event determined probabilistically for each position in the genome (locus) where the optimal probability is proposed to be 1.0/L where L is the length of the binary string. +* Population sizes must be large enough to provide a sufficiently diverse pool of building blocks for the process to be effective, typically equal to the number bits in a candidate solution or some function thereof. + +Summary +The Genetic Algorithm is a general optimization algorithm that models problem domains as binary strings. The lack of problem specific information incorporated into the technique means that it can be rapidly applied against a given problem. This same benefit also means that the technique is fragile to the the mapping of binary strings to the domain and the amount of problem specific information encoded into the fitness evaluation. + + +Further Reading + +* Goldberg +* EC1 and EC2 diff --git a/deprecated/v0.2/geneticalgorithm_tutorial.txt b/deprecated/v0.2/geneticalgorithm_tutorial.txt new file mode 100755 index 00000000..b139d585 --- /dev/null +++ b/deprecated/v0.2/geneticalgorithm_tutorial.txt @@ -0,0 +1,119 @@ +Genetic Algorithm Tutorial +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/03 JB Created + +Introduction +This tutorial covers how to implement a genetic algorithm that operates on candidate solutions with binary genomes to solve the OneMax problem. + +Problem +The problem instance is called OneMax which scores binary strings based on the number of 1's in the string. It is a popular objective function for demonstrating the Genetic Algorithm as it does not required the binary string to be mapped to another domain, allowing the direct evaluation. + +def count_ones(bitstring) + bitstring.inject(0) {|sum, x| sum + ((x=='1') ? 1 : 0)} +end + +The problem specific mapping and evaluation of solutions by an oracle objective function is computationally the most time consuming aspect of the genetic algorithm. + +The OneMax function demonstrates an important principle for the genetic algorithm, and that is relationship of an oracle objective function used for the atomic assessment of candidate solutions. The objective function (or fitness function) is a black box to the algorithm, providing numerical scorings of candidate solutions that only have meaning in the context of other assessed candidate solutions to which the algorithm has been exposed. + +In the case of the OneMax problem, the algorithm will receive the same scoring for two very different strings (for example 111000 and 100101). This highlights the generality of the algorithm, where the algorithm solves through a process of induction using only fitness information. For example, if 111000 becomes 111001, the algorithm conserves the new string with the explicit knowledge that the improvement in fitness was caused by making a '0' become a '1'. + +Solution +The solution is represented as a binary string genome, in this case an array of characters where each position in the array can have the value of '0' or '1'. Each BinarySolution has a fitness value associated with it, evaluated and assigned by the objective function of the OneMaxProblem. + +New solutions are seeded with random bit strings and are used at the beginning of an algorithm run. + +def initialize_random(length) + @genome = Array.new(length) + @genome.fill {|index| Random.next_bool ? "1" : "0"} +end + +The typical case during the run involves the creation of new candidate solutions (offspring) from candidate solutions already selected and evaluated (parents). This recombination process involves copying sequences of bits from each parent, where the copying process (or transcription) of each value occurs with some probability of error. + +def initialize_recombination(parent1, parent2) + # ensure first and last positions are never selected (always cross) + pos = Random.next_int(parent1.genome.length - 2) + 1 + # create a new genome + @genome = Array.new(parent1.genome.length) + # recombine the genomes with copy errors + @genome.fill do |index| + if index < pos + @genome[index] = transcribe parent1.genome[index] + else + @genome[index] = transcribe parent2.genome[index] + end + end +end + +def transcribe(value) + if Random.next_float < heuristic_mutation_rate(@genome.length) + return ((value == "1") ? "0" : "1" ) + end + return value +end + +Importantly, the solutions are specific to the problem, in this case binary strings with floating point fitness scorings. The genetic algorithm logic relevant to manipulating solutions (the crossover and mutation procedures) are encapsulated within the solution class and specialized for the character-based binary strings. + +Algorithm +The algorithm is implemented as a generic (not problem specific) and reusable system that manages a population of candidate solutions over a set of generations in the context of a provided problem definition. The problem provides interfaces for the algorithm to acquire new and recombined candidate solutions, solution evaluation and solution comparisons - encapsulating notions of relative improvement (maximization or minimization). + +As such, the algorithm initializes some internal state at the beginning of each evolve request and executes generations, each of which creates a new population for the existing old population of candidate solutions. The algorithm keeps track of the best solution found to date (as defined by the problem) and stops executing generations after a stop condition is triggered. In this case a set number of generations or an optimal solution is located as defined by the problem (in this case the optimal solution is equal to the number of bits in the problem). + +def evolve(problem) + # store problem + @problem = problem + # prepare the population and state + initialize_population + # evolve until stop condition is triggered + while not should_stop? do + @population = evolve_population population + end +end + +def evolve_population(population) + # evaluate + population.each do |solution| + @problem.cost solution + @best_solution = @problem.choose_better @best_solution, solution + end + # select + selected = population.collect {|solution| tournament_select solution, population} + # recombine and mutate + new_population = Array.new(population.length) + population.each_with_index do |solution, index| + # probabilistic crossover or promotion + if Random.next_float < heuristic_crossover_rate + if index.modulo(2)==0 + new_population[index] = @problem.new_solution_recombine(solution, population[index+1]) + else + new_population[index] = @problem.new_solution_recombine(solution, population[index-1]) + end + else + new_population[index] = solution + end + end + # one more generation has completed + @generation += 1 + puts "#{@generation}, best: #{@best_solution}" + return new_population +end + +The evolve_population function does all the hard work, first evaluating the population, selecting the breeding set using tournament-based fitness proportionate-selection, and creating the new population from the selected set. Recombination occurs probabilistically with a high probability. When recombination is not used, the candidate solution is directly promoted to the new population. + +The fitness tournament is a simple and efficient implementation of selection that involves a set number of bouts (fights) for each position in the breeding set. The bouts a decided based on the candidate solutions assigned fitness, the relative comparisons of which are assessed using the problem definition. + +def tournament_select(base, population) + bouts = 1 + while bouts <= heuristic_selection_num_bouts do + pos = Random.next_int(population.length) + candidate = population[pos] + base = @problem.choose_better(base, candidate) + bouts += 1 + end + return base +end + +Summary +The Genetic Algorithm has a number of parameters which influence the greediness and randomness of the algorithm. Given the number of probabilistic decisions made during each iteration of the algorithm, the same inputs to the algorithm can provide an array of varied output results, such as the best solution found. As such, it is important that the system is executed a number of times for any given configuration. This will provide a more meaningful understanding of the algorithms capability for a configuration on a problem. diff --git a/deprecated/v0.2/geneticprogramming_code.rb b/deprecated/v0.2/geneticprogramming_code.rb new file mode 100755 index 00000000..2b11ed84 --- /dev/null +++ b/deprecated/v0.2/geneticprogramming_code.rb @@ -0,0 +1,276 @@ +# Genetic Programming in Ruby +# Copyright (C) 2008 Jason Brownlee + +# Change History +# 2008/12/06 JB Created + +# TODO +# - penalize functions that produce an NAN +# - working crossover operator + + +require 'utils' + + +class GPNode + attr_accessor :value, :left, :right + + def initialize(value, left=nil, right=nil) + @value, @left, @right = value, left, right + end + + def is_leaf? + @left.nil? and @right.nil? + end + + def to_s + (is_leaf? ? "#{@value}" : "(#{@value}, #{@left}, #{@right})") + end + + def size + 1 + (is_leaf? ? 0 : (@left.size + @right.size)) + end + + def eval + is_leaf? ? @value : @value.call(@left.eval, @right.eval) + end + + def copy + is_leaf? ? GPNode.new(@value) : GPNode.new(@value, @left.copy, @right.copy) + end + + def to_array(array) + # self + array << self + # children + if !is_leaf? + @left.to_array(array) + @right.to_array(array) + end + end + + def get_node(index) + arr = Array.new + to_array(arr) + return arr[index] + end + + def replace(index, replacement, count=1) + + end + +end + + +class GPSolution + attr_accessor :fitness, :expression + + def initialize() + @fitness = Numbers::NAN + end + + def initialize_random(problem) + @expression = random_expression(problem, problem.heuristic_max_depth) + end + + def initialize_recombination(problem, parent1, parent2) + pos1 = (Random::next_int(parent1.expression.size - 2) + 1) + pos2 = (Random::next_int(parent2.expression.size - 2 ) + 1) + # copy all of first parent + @expression = parent1.expression.copy + # replace crossover point with copy of second parent cross point + # @expression.get_node(pos1-1).right = parent2.expression.get_node(pos2).copy + # mutation the expression + mutate_expression(problem, @expression) + end + + def mutate_expression(problem, node) + if Random.next_float < heuristic_mutation_rate + if node.is_leaf? + node.value = problem.terminal_set[Random::next_int(problem.terminal_set.length)].call + else + node.value = problem.function_set[Random::next_int(problem.function_set.length)] + end + end + if !node.is_leaf? + mutate_expression(problem, node.left) + mutate_expression(problem, node.right) + end + end + + def random_expression(problem, max_depth, curr_depth=1) + if (curr_depth.to_f/max_depth.to_f) < Random::next_float + func = problem.function_set[Random::next_int(problem.function_set.length)] + return GPNode.new(func, random_expression(problem,max_depth,curr_depth+1), random_expression(problem,max_depth,curr_depth+1)) + else + term = problem.terminal_set[Random::next_int(problem.terminal_set.length)] + val = term.call + return GPNode.new(val) + end + end + + def to_s + "size=(#{@expression.size}), eval=(#{@expression.eval}) f=(#{@fitness})" + end + + def heuristic_mutation_rate + 1.0 / @expression.size.to_f + end + +end + +class ApproximatePI + attr_reader :function_set, :terminal_set, :goal + + def initialize() + @function_set = [ lambda{|a,b| a*b}, lambda{|a,b| a/b}, lambda{|a,b| a+b}, lambda{|a,b| a-b}] + @terminal_set = [ lambda{Random::next_float} ] + @goal = Math::PI #rounded to 3.14159 + end + + def cost(solution) + # evaluate the expression + value = solution.expression.eval + # absolute difference from goal value + solution.fitness = (round(@goal) - round(value)).abs + end + + def is_optimal?(solution) + solution.fitness == 0.0 + end + + def round(v) + ((v * 100000.0).floor).to_f / 100000.0 + end + + def is_maximizing? + return false + end + + def choose_better s1, s2 + return s2 if s1.nil? + return s1 if s2.nil? + # minimizing + return (s1.fitness <= s2.fitness) ? s1 : s2 + end + + def new_solution + s = GPSolution.new + s.initialize_random(self) + return s + end + + def new_solution_recombine(parent1, parent2) + s = GPSolution.new + s.initialize_recombination(self, parent1, parent2) + return s + end + + def heuristic_max_depth + 6 + end + +end + + +class GeneticProgrammingAlgorithm + attr_reader :problem, :population, :generation, :best_solution + + def initialize() + end + + def evolve(problem) + # store problem + @problem = problem + # prepare the initial population + initialize_population + # evaluate the initial population + evaluate_population(population) + # evolve until stop condition is triggered + while !should_stop? do + # create the new population + @population = evolve_population(@population) + end + end + + def initialize_population + @best_solution = nil + @generation = 0 + @population = Array.new(heuristic_population_size) { |i| @problem.new_solution } + end + + def evaluate_population(pop) + pop.each do |solution| + @problem.cost(solution) + @best_solution = @problem.choose_better(@best_solution, solution) + end + end + + def evolve_population(population) + # evaluate + evaluate_population(population) + # select + selected = population.collect {|solution| tournament_select(solution, population)} + # recombine and mutate + new_population = Array.new(population.length) + selected.each_with_index do |solution, index| + # probabilistic crossover or promotion + if Random.next_float < heuristic_crossover_rate + if index.modulo(2)==0 + new_population[index] = @problem.new_solution_recombine(solution, selected[index+1]) + else + new_population[index] = @problem.new_solution_recombine(solution, selected[index-1]) + end + else + new_population[index] = solution + end + end + # one more generation has completed + @generation += 1 + puts "#{@generation}, best: #{@best_solution}" + return new_population + end + + def should_stop? + return true if @problem.is_optimal?(@best_solution) + return true if generation >= heuristic_total_generations + return false + end + + # tournament selection with reselection + def tournament_select(base, population) + bouts = 1 + while bouts <= heuristic_selection_num_bouts do + pos = Random.next_int(population.length) + candidate = population[pos] + base = @problem.choose_better(base, candidate) + bouts += 1 + end + return base + end + + def heuristic_total_generations + return 50 + end + + def heuristic_population_size + 200 + end + + def heuristic_crossover_rate + 0.90 + end + + def heuristic_selection_num_bouts + 7 + end +end + +# problem test +Random.seed(Time.now.to_f) +problem = ApproximatePI.new +puts "goal=(#{problem.goal}), rounded=#{problem.round(problem.goal)}" +algorithm = GeneticProgrammingAlgorithm.new +algorithm.evolve(problem) +puts "Best Solution: #{algorithm.best_solution}" + diff --git a/deprecated/v0.2/geneticprogramming_overview.txt b/deprecated/v0.2/geneticprogramming_overview.txt new file mode 100755 index 00000000..50e86ffc --- /dev/null +++ b/deprecated/v0.2/geneticprogramming_overview.txt @@ -0,0 +1,46 @@ +Genetic Programming Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/06 JB Created + +Inspiration +: Genetic Programming is inspired by the biological theory of evolution by means of natural selection + +Strategy +: The strategy for Genetic Programming is to exploit evolution to develop a program to complete a user defined task. + +Genetic Programming (GP) is concerned with exploiting an evolutionary process to develop programs to solve a given problem. Candidate solutions are represented as executable programs, typically as expression trees in a scope limited functional language. Programs may or may not take input parameters and are assessed based upon their execution in the context of a problem domain. GP represent a different approach to evolutionary-based problem solving where instead of searching for a set of parameters to satisfy a model, a program-based model is evolved to satisfy the problem. + +Procedure +A typical Genetic Programming algorithm involves the evolution of a population of candidate solutions of a number of generations of selection, recombination, and mutation. The expression tree representation requires specialized genetic operators for recombination and variation. The crossover operator involves pruning a part of the expression tree from one parent, and grafting on a sub-tree from a second parent. The mutation operator involves exchanging values or functions within the expression tree. + +For example a program may use a set of arithmetic functions, each of which take two parameters, such as *\+-. A candidate solution may be be comprised of a mixture of the functions and numerical constants, arranged in a binary expression tree that is executed in a depth-first manner. + + + / \ + 2 4 + +A common way to represent such solutions is to use a LISP-like syntax that represents programs as binary-expression trees using the prefix notation. The above example would be presented as: (+ 2,4). Nodes in the tree that accept one or more parameters are referred to as 'function nodes', whereas those that do not accept a parameter such as numeric constant are called 'terminal nodes'. + +Candidate solutions may be comprised of mathematic functions as well as domain specific functions, such a movement directions for a maze controller. More elaborate realizations of the algorithm allow for the evolution the functions used by the candidate solutions called automatically defined functions (ADF) or the evolution of the genetic operators applied to the candidate solutions each generation. + +Initialize Population +While not Stop Condition + Evaluation population (execute programs) + Selection + Crossover, Mutation + Replacement +End + +Heuristics +* Fitness assessments typically take the structure of the program into account (rewarding parsimony) as well as the problem specific evaluation +* Crossover points are biased towards selecting function nodes in the expression tree over terminals (90/10 split) +* The probability of a crossover event is typically high (90% of members) and the probability of a point mutation is typically low (1% of nodes) +* Typical execution involve large population sizes (100-500, sometimes much larger) over a relatively small number of generations (10 to 50) +* The function and terminal sets are domain dependent, and may direct interact with the problem (such as mathematic function or controller) or may construct a solution for evaluation (such as design problems). + +Further Reading + +- http://www.genetic-programming.com/ +- Genetic Programming IV: Routine Human-Competitive Machine Intelligence (2003) +- A Field Guide to Genetic Programming (2008) http://www.gp-field-guide.org.uk/ \ No newline at end of file diff --git a/deprecated/v0.2/geneticprogramming_tutorial.txt b/deprecated/v0.2/geneticprogramming_tutorial.txt new file mode 100755 index 00000000..6fef64da --- /dev/null +++ b/deprecated/v0.2/geneticprogramming_tutorial.txt @@ -0,0 +1,66 @@ +Genetic Programming Tutorial +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/06 JB Created + +Introduction +This tutorial demonstrates an implementation of the Genetic Programming algorithm applied to the approximation of Pi using arithmetic mathematical functions. + +Problem +The mathematical constant PI represents the ratio of a circles circumference to its diameter. There are many known approximations of PI, for example: 22/7, 355/113. + +This problem provides a good demonstration of the GP as the problem is simple in that it doesn't have any parameters or require multiple executions of the candidate solution against the problem definition, although requires the development of a mathematic expression (program) to approximate the value of Pi. The cost function is defined as the absolute difference from the value produced by a candidate solution to PI as defined by the ruby constant Math::PI (3.14159265358979). Engineers typically use Pi to 5 or 6 significant figures, so the problem is simplified to approximating PI rounded to 5 places: 3.14159. + +The function set is limited to the four arithmetic functions: *, \, +, and - implemented as lambda's with an arity of 2. The terminal set is limited to constant floating point values between 0 and 1. The cost function involves evaluating a given solution's expression and calculating the absolute difference between the resulting value an Pi bout rounded to 6 significant places. + +def cost(solution) + # evaluate the expression + value = solution.expression.eval + # absolute difference from goal value + solution.fitness = (round(@goal) - round(value)).abs +end + +def round(v) + ((v * 100000.0).floor).to_f / 100000.0 +end + +Solution +A candidate solution is comprised of an expression tree and a fitness value. Each solution manages it's own expression tree, so the recombination and mutation operators belong to the solution class. + +Expressions are managed as a binary tree of nodes, where a given node has a value and a left and right children. Function nodes are assigned one of the four arithmetic lambda's for their value and a node for each child (terminal or function). Terminal nodes are assigned a floating point constant value. The tree structure is held together by the object references and can be traversed in a traditional manner. + +New candidate solutions are prepared with a random expression, created recursively and bounded bounded to a maximum tree depth. + +def random_expression(problem, max_depth, curr_depth=1) + if (curr_depth.to_f/max_depth.to_f) < Random::next_float + func = problem.function_set[Random::next_int(problem.function_set.length)] + return GPNode.new(func, random_expression(problem,max_depth,curr_depth+1), random_expression(problem,max_depth,curr_depth+1)) + else + term = problem.terminal_set[Random::next_int(problem.terminal_set.length)] + val = term.call + return GPNode.new(val) + end +end + +Recombined candidate solutions are created via a process of one point crossover between two parent solutions, which is then mutated recursively. + +def mutate_expression(problem, node) + if Random.next_float < heuristic_mutation_rate + if node.is_leaf? + node.value = problem.terminal_set[Random::next_int(problem.terminal_set.length)].call + else + node.value = problem.function_set[Random::next_int(problem.function_set.length)] + end + end + if !node.is_leaf? + mutate_expression(problem, node.left) + mutate_expression(problem, node.right) + end +end + +Algorithm +The Genetic Programming algorithm is a reusable system that evolves a solution in the context of a problem. The problem contains sufficient information for creating new solutions. The algorithm is executed by calling evolve that initializes the base population can rapidly calls the next_generation method. A breeding set is selected from the population each generation using tournament selection. + +Summary +Natural extensions involve varied genetic operators, and more intestinally different problem domains with new functions from which to encode candidate solutions. \ No newline at end of file diff --git a/deprecated/v0.2/grammaticalevolution_code.rb b/deprecated/v0.2/grammaticalevolution_code.rb new file mode 100755 index 00000000..91ba413c --- /dev/null +++ b/deprecated/v0.2/grammaticalevolution_code.rb @@ -0,0 +1,280 @@ +# Grammatical Evolution in Ruby +# Copyright (C) 2008 Jason Brownlee + +# Change History +# 2008/12/09 JB Created + +# TODO +# - penality function for functions that produce an NAN (currently crap evolution) +# - addition of duplication and pruning functions + + +require 'utils' + +# a generic binary string solution +class BinarySolution + + attr_reader :genome + attr_accessor :fitness, :expression + + def initialize() + @fitness = Numbers::NAN + end + + def initialize_random(length) + @genome = Array.new(length) {|i| Random.next_bool ? "1" : "0"} + end + + def initialize_from_parent(parent) + # transcribe with coping errors + length = parent.genome.length + @genome = Array.new(length) {|i| transcribe(parent.genome[i], length)} + # duplicate + + # prune + end + + def initialize_recombination(parent1, parent2) + # handle variable length genomes + lengths = [parent1.genome.length, parent2.genome.length] + min, max = lengths.min, lengths.max + cut = Random.next_int(min - 2) + 1 + # recombine the genomes with copy errors + @genome = Array.new(max) {|i| (i= @genome.length + end + return decode_value(bitstring) + end + + def reset_read + @index = 0 + end + + # decodes to a number between 0 and 2**8 (256) + def decode_value(bitstring) + sum = 0 + bitstring.each_with_index do |x, i| + sum += ((x=='1') ? 1 : 0) * (2 ** i) + end + return sum + end + + def to_s + "expression=#{@expression} fitness=(#{@fitness})" + # "fitness=(#{@fitness})" + end + +end + + +class SymbolicRegressionProblem + + @@exp = [" EXP BINARY EXP ", " (EXP BINARY EXP) ", " UNIARY(EXP) ", " VAR "] + @@op = ["+", "-", "/", "*" ] + @@preop = ["Math.sin", "Math.cos", "Math.exp", "Math.log"] + @@var = ["INPUT", "1.0"] + @@all = {"EXP"=>@@exp, "BINARY"=>@@op, "UNIARY"=>@@preop, "VAR"=>@@var} + @@start = "EXP" + + def initialize + @min = -1.0 + @max = +1.0 + end + + def source(x) + (x**4.0) + (x**3.0) + (x**2.0) + x + end + + # count the number of '1' bits in a string + def cost(solution) + # reset the solution + solution.reset_read + # parse expression + solution.expression = map(solution, @@start) + # sum errors in the model + errors = 0.0 + heuristic_num_exposures.times do |i| + x = Random::next_float_bounded(@min, @max) + exp = solution.expression.gsub(@@var[0], x.to_s) + begin + score = eval(exp) + rescue + score = Numbers::NAN + end + errors += ((score.nan? ? 1.0 : score) - source(x)).abs + end + solution.fitness = errors + end + + # depth first + def map(solution, str, depth=0) + @@all.keys.each do |key| + str = str.gsub(key) do |k| + set = @@all[k] + if key=="EXP" and depth>=heuristic_max_depth + map(solution, set[set.length - 1], depth+1) + else + i = solution.read_next_int.modulo(set.length) + map(solution, set[i], depth+1) + end + end + end + return str + end + + def choose_better(s1, s2) + return s2 if s1.nil? + return s1 if s2.nil? + return (s1.fitness <= s2.fitness) ? s1 : s2 + end + + def new_solution + s = BinarySolution.new + s.initialize_random(80) + return s + end + + def new_solution_copy(parent) + s = BinarySolution.new + s.initialize_from_parent(parent) + return s + end + + def new_solution_recombine(parent1, parent2) + s = BinarySolution.new + s.initialize_recombination(parent1, parent2) + return s + end + + def is_optimal?(solution) + !solution.nil? and (solution.fitness == 0.0) + end + + def heuristic_max_depth + 10 + end + + def heuristic_num_exposures + 10 + end + +end + + +class GrammaticalEvolutionAlgorithm + attr_reader :problem, :population, :generation, :best_solution + + def evolve problem + # store problem + @problem = problem + # prepare the population and state + @best_solution = nil + @generation = 0 + @population = Array.new(heuristic_population_size) {|i| @problem.new_solution} + # evolve until stop condition is triggered + @population = evolve_population(@population) until should_stop? + end + + def evaluate(pop) + pop.each do |solution| + @problem.cost(solution) + @best_solution = @problem.choose_better(@best_solution, solution) + end + end + + def evolve_population(population) + # evaluate + evaluate(population) + # select + selected = population.collect {|solution| tournament_select solution, population} + # recombine and mutate + new_population = Array.new(population.length) + selected.each_with_index do |solution, index| + # probabilistic crossover or promotion + if Random.next_float < heuristic_crossover_rate + if index.modulo(2)==0 + new_population[index] = @problem.new_solution_recombine(solution, selected[index+1]) + else + new_population[index] = @problem.new_solution_recombine(solution, selected[index-1]) + end + else + new_population[index] = @problem.new_solution_copy(solution) + end + end + # one more generation has completed + @generation += 1 + puts "#{@generation}, best: #{@best_solution}" + return new_population + end + + # tournament selection with reselection + def tournament_select(base, population) + bouts = 1 + while bouts <= heuristic_selection_num_bouts do + pos = Random.next_int(population.length) + base = @problem.choose_better(base, population[pos]) + bouts += 1 + end + return base + end + + def should_stop? + @problem.is_optimal?(@best_solution) or (@generation >= heuristic_total_generations) + end + + def heuristic_total_generations + return 200 + end + + def heuristic_population_size + 50 + end + + def heuristic_crossover_rate + 0.2 + end + + def heuristic_selection_num_bouts + 3 + end + +end + + +# run it +Random.seed(Time.now.to_f) +problem = SymbolicRegressionProblem.new +algorithm = GrammaticalEvolutionAlgorithm.new +algorithm.evolve(problem) +puts "Finished, best solution: #{ algorithm.best_solution}" +puts "bitstring: #{algorithm.best_solution.genome}" +puts "expression: #{algorithm.best_solution.expression}" diff --git a/deprecated/v0.2/grammaticalevolution_overview.txt b/deprecated/v0.2/grammaticalevolution_overview.txt new file mode 100755 index 00000000..c08ef1d6 --- /dev/null +++ b/deprecated/v0.2/grammaticalevolution_overview.txt @@ -0,0 +1,39 @@ +Grammatical Evolution Overview +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/09 JB Created + +Inspiration +: Grammatical Evolution is inspired by the theory of evolution by means of natural selection. + +Strategy +: The strategy of Grammatical Evolution is to evolve programs represented by variable length binary strings that are mapped onto an expression tree defined by a context free grammar. + +Grammatical Evolution (GE) is focused on the evolution of expression trees with a well defined grammatical structure. The grammar is problem specific and defined by a final method called Backus Normal Form (BNF). A differentiating feature of GE from other program evolving algorithms is that the base representation used for candidate solutions are binary strings which are mapped into integers and eventually onto a domain specific BNF. + +As such GE exploits the adaptive potential of the evolutionary process applied to a sub-symbolic representation, coupled with the power of mapping the sub-symbolic representation onto a context free grammar that can formally define a natural language, a programming language, or domain specific language. + +Procedure +The GE procedure is an evolutionary process where an initial randomly generated population of binary strings is adapted over a series of generations. A generation involves the mapping and evaluation of candidate solutions, the selection of a breeding set that replicate, and the integration of offspring into the population. + +New candidate solutions are created by crossing over two existing solutions and coping their representation with low frequency copying errors. GE uses variable length binary strings to represent candidate solutions, so the crossover operator must take this into consideration when selecting a cross point. Two additional genetic operators are used, prune operator that deletes sequences of bits from the genome, and a duplication operator that copies sequences of bits and adds them to the end of the genome. Both of the structure modifying genetic operators typically operate on 'gene' (parameter scope), that is sets of contiguous bits that map into a discrete parameter in the problem space duration candidate solution evaluation. + +The mapping process involves first decoding the binary string to a sequences of integers, and then consistently mapping the sequences of integers onto a syntactically correct expression using a BNF definition. + +Bits are read from the a candidate solutions genome in blocks of 8 and decoded to an integer (in the range between 0 and 2^8-1). If the end of the bitstring is reached when reading integers, the reading process loops back to the start of the string, effectively treating creating a circular genome. The integers are mapped to expressions from the BNF until a complete syntactically correct expression is formed. This may not decode and use a solutions entire genome or may decode the genome more than once given it's circular nature. + +A problem with this technique are genome failures. These are valid expression trees that produce invalid results, such as infinity, NaN or divide by zero errors. Such candidate solutions may be removed from the population or penalized for the errors. As such, a conservative approach is needed to manage the population with low frequency crossover, large generational gap, and low frequency mutation. + +Heuristics +* Use of a large generational gap, for example replace about half the population per generation (least fit individuals). +* A low crossover rate, such as 20% of new solutions +* A low mutation rate, such as 1/L (length of binary string) +* Penalization of expression trees that generate invalid results. + +Further Reading +* Grammatical Evolution: Evolutionary Automatic Programming in an Arbitrary Language (2003) +* Grammatical Evolution : Evolving Programs for an Arbitrary Language (1998) + http://www.grammatical-evolution.org/papers/eurogp98/index.html + (provides introduction to the technique and a simple symbolic regression example) +* Official website: http://www.grammatical-evolution.org diff --git a/deprecated/v0.2/grammaticalevolution_tutorial.txt b/deprecated/v0.2/grammaticalevolution_tutorial.txt new file mode 100755 index 00000000..cf595cdf --- /dev/null +++ b/deprecated/v0.2/grammaticalevolution_tutorial.txt @@ -0,0 +1,150 @@ +Grammatical Evolution Tutorial +Copyright (C) 2008 Jason Brownlee + +Change History +2008/12/09 JB Created + +Introduction +This tutorial demonstrates the Grammatical Evolution algorithm applied to a symbolic regression problem. +Based on the paper: http://www.grammatical-evolution.org/papers/eurogp98/index.html +Modified to include 1.0 in the terminal set and replace tan with exp in the pre-op set, as defined in Grammatical Evolution, IEEE Transactions in Evolutionary Computation (2001). + +Problem +The problem is involves composing an expression that sufficiently maps a set of input values onto a set of output values, minimizing error in the mapping. The problem is called symbolic regression. In this case the source of the mapping is the expression: + +def source(x) + (x**4.0) + (x**3.0) + (x**2.0) + x +end + +Input values are generated randomly from the range [-1, +1]. The set of valid symbols is defined in BNF as follows: + +(1) ::= (A) + | ( ) (B) + | ( ) (C) + | (D) + +(2) ::= + (A) + | - (B) + | / (C) + | * (D) + +(3) ::= Sin (A) + | Cos (B) + | Tan (C) + | Log (D) + +(4) ::= X + + +Where the state expression is equal to . A valid solution under these constraints is as follows: sin(X) + X + X * X. + +A simple string-replacement approach is taken to managing the BNF and constructing expressions from sequences of integers. The problem class defines the expressions as follows: + +@@exp = [" EXP BINARY EXP ", " (EXP BINARY EXP) ", " UNIARY(EXP) ", " VAR "] +@@op = ["+", "-", "/", "*" ] +@@preop = ["Math.sin", "Math.cos", "Math.exp", "Math.log"] +@@var = ["INPUT", "1.0"] +@@all = {"EXP"=>@@exp, "BINARY"=>@@op, "UNIARY"=>@@preop, "VAR"=>@@var} +@@start = "EXP" + +A candidate solution is mapped into these expression recursively, using decoded integers sequentially in the creation of the expression tree in a depth first manner. The size of the expression tree is bounded at heuristic_max_depth to avoid potential of blowing the stack given that there are only two terminal nodes in the BNF: the INPUT value and 1.0 in the @@var class variable. + +def map(solution, str, depth=0) + @@all.keys.each do |key| + str = str.gsub(key) do |k| + set = @@all[k] + if key=="EXP" and depth>=heuristic_max_depth + map(solution, set[set.length - 1], depth+1) + else + i = solution.read_next_int.modulo(set.length) + map(solution, set[i], depth+1) + end + end + end + return str +end + +A candidate solution is evaluated by first mapping it onto a generic expression, then specializing the expression for a given input value to calculate its output value and finally its error from the source function. This is repeated a number of times, and the candidate solutions fitness value is assigned as the sum of the absolute errors. + +def cost(solution) + # reset the solution + solution.reset_read + # parse expression + solution.expression = map(solution, @@start) + # sum errors in the model + errors = 0.0 + heuristic_num_exposures.times do |i| + x = Random::next_float_bounded(@min, @max) + exp = solution.expression.gsub(@@var[0], x.to_s) + begin + score = eval(exp) + rescue + score = Numbers::NAN + end + errors += ((score.nan? ? 1.0 : score) - source(x)).abs + end + solution.fitness = errors +end + +Solution +A solution is defined by a bit string and a fitness evaluation. The circular reading of bits and decoding to integers is managed by a read_next_int function called by the problem when creating the problems expression. The consistent use of this approach requires a call to reset_read before a cost evaluation that sets the index to zero. + +def read_next_int + bitstring = Array.new(8) + 8.times do |i| + bitstring[i] = @genome[@index] + @index += 1 + reset_read if @index >= @genome.length + end + return decode_value(bitstring) +end + +The creation of new candidate solutions via crossover requires the careful selection of the cross point, and the size of the offsprings binary string. There are also some prune and duplicate genetic operators (not written yet). + +def initialize_recombination(parent1, parent2) + # handle variable length genomes + lengths = [parent1.genome.length, parent2.genome.length] + min, max = lengths.min, lengths.max + cut = Random.next_int(min - 2) + 1 + # recombine the genomes with copy errors + @genome = Array.new(max) {|i| (i