Skip to content

Commit 1191169

Browse files
committed
Updated directory structure.
1 parent c003c8b commit 1191169

File tree

3 files changed

+276
-0
lines changed

3 files changed

+276
-0
lines changed
Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
#! usr/bin/env python3
2+
3+
import random
4+
5+
"""
6+
2.3
7+
8+
Adaptive Random Search was designed to address the limitations
9+
of the fixed step size in the Localized Random Search algorithm.
10+
The strategy for Adaptive Random Search is to continually approximate
11+
the optimal step size required to reach the global optimum in the search
12+
space. This is achieved by trialling and adopting smaller or larger step sizes
13+
only if they result in an improvement in the search performance.
14+
15+
The strategy of Adaptive Random Search is to trial a larger step in each
16+
iteration and adopt the larger step if it results in an improved result. Very
17+
large step sizes are trialled in the same manner although with a much lower
18+
frequency. This strategy of preferring large moves is intended to allow the
19+
technique to escape local optima. Smaller step sizes are adopted if no
20+
improvement is made for an extended period.
21+
22+
The example problem below is similar to the one solved by Random Search [2.2].
23+
24+
@author Chad Estioco
25+
"""
26+
27+
def objective_function(vector):
28+
"""
29+
Similar to the one in [2.2]
30+
"""
31+
sum = 0
32+
33+
for val in vector:
34+
sum += val ** 2
35+
36+
return sum
37+
38+
def rand_in_bounds(minimum, maximum):
39+
return minimum + ((maximum - minimum) * random.random())
40+
41+
def random_vector(minmax):
42+
"""
43+
_Essentially_ similar to the one in [2.2]
44+
"""
45+
i = 0
46+
limit = len(minmax)
47+
random_vector = [0 for i in range(limit)]
48+
49+
for i in range(limit):
50+
random_vector[i] = rand_in_bounds(minmax[i][0], minmax[i][1])
51+
52+
return random_vector
53+
54+
def take_step(minmax, current, step_size):
55+
limit = len(current)
56+
position = [0 for i in range(limit)]
57+
58+
for i in range(limit):
59+
minimum = max(minmax[i][0], current[i] - step_size)
60+
maximum = min(minmax[i][1], current[i] + step_size)
61+
position[i] = rand_in_bounds(minimum, maximum)
62+
63+
return position
64+
65+
def large_step_size(iter_count, step_size, s_factor, l_factor, iter_mult):
66+
if iter_count > 0 and iter_count % iter_mult == 0:
67+
return step_size * l_factor
68+
else:
69+
return step_size * s_factor
70+
71+
def take_steps(bounds, current, step_size, big_stepsize):
72+
step, big_step = {}, {}
73+
step["vector"] = take_step(bounds, current["vector"], step_size)
74+
step["cost"] = objective_function(step["vector"])
75+
big_step["vector"] = take_step(bounds, current["vector"], big_stepsize)
76+
big_step["cost"] = objective_function(big_step["vector"])
77+
return step, big_step
78+
79+
def search(max_iter, bounds, init_factor, s_factor, l_factor, iter_mult, max_no_impr):
80+
step_size = (bounds[0][1] - bounds[0][0]) * init_factor
81+
current, count = {}, 0
82+
current["vector"] = random_vector(bounds)
83+
current["cost"] = objective_function(current["vector"])
84+
85+
for i in range(max_iter):
86+
big_stepsize = large_step_size(i, step_size, s_factor, l_factor, iter_mult)
87+
step, big_step = take_steps(bounds, current, step_size, big_stepsize)
88+
89+
if step["cost"] <= current["cost"] or big_step["cost"] <= current["cost"]:
90+
if big_step["cost"] <= step["cost"]:
91+
step_size, current = big_stepsize, big_step
92+
else:
93+
current = step
94+
95+
count = 0
96+
else:
97+
count += 1
98+
99+
if count >= max_no_impr:
100+
count, stepSize = 0, (step_size/s_factor)
101+
102+
print("Iteration " + str(i) + ": best = " + str(current["cost"]))
103+
104+
return current
105+
106+
if __name__ == "__main__":
107+
# problem configuration
108+
problem_size = 2
109+
bounds = [[-5, 5] for i in range(problem_size)]
110+
111+
# algorithm configuration
112+
max_iter = 1000
113+
init_factor = 0.05
114+
s_factor = 1.3
115+
l_factor = 3.0
116+
iter_mult = 10
117+
max_no_impr = 30
118+
119+
# execute the algorithm
120+
best = search(max_iter, bounds, init_factor, s_factor, l_factor, iter_mult, max_no_impr)
121+
print("Done. Best Solution: cost = " + str(best["cost"]) + ", v = " + str(best["vector"]))
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
#! usr/bin/env python3
2+
3+
import random
4+
5+
"""
6+
2.2
7+
8+
Random Search samples solutions from across the entire search space
9+
using a uniform probability distribution. Each future sample is
10+
independent of the samples that come before it.
11+
12+
The example problem (solved by the code below) is an instance of a
13+
continuous function optimization that seeks min f(x) where
14+
f = \sum_{i=1}^{n} x_i^2, -5 <= x_i <= 5 and n = 2.
15+
16+
@author Chad Estioco
17+
"""
18+
19+
def objective_function(vector):
20+
sum = 0
21+
22+
for val in vector:
23+
sum += val ** 2
24+
25+
return sum
26+
27+
def random_vector(minmax):
28+
i = 0
29+
limit = len(minmax)
30+
random_vector = [0 for i in range(limit)]
31+
32+
for i in range(limit):
33+
spam = minmax[i][0]
34+
random_vector[i] = spam + ((minmax[i][1] - spam) * random.random())
35+
36+
return random_vector
37+
38+
def search(search_space, max_iter):
39+
best = None
40+
41+
for i in range(max_iter):
42+
candidate = {}
43+
candidate['vector'] = random_vector(search_space)
44+
candidate['cost'] = objective_function(candidate['vector'])
45+
46+
if best is None or candidate['cost'] < best['cost']:
47+
best = candidate
48+
49+
print("Iteration " + str(i) + ": best = " + str(best['cost']))
50+
51+
return best
52+
53+
if __name__ == "__main__":
54+
# problem configuration
55+
problem_size = 2
56+
search_space = [[-5, 5] for i in range(problem_size)]
57+
58+
# algorithm configuration
59+
max_iter = 100
60+
61+
# execute the algorithm
62+
best = search(search_space ,max_iter)
63+
print("Done. Best Solution: cost = " + str(best['cost']) + ", v = " + str(best['vector']))
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
#! usr/bin/env python3
2+
3+
import random
4+
5+
"""
6+
2.4
7+
8+
Stochastic Hill Climbing iterates the process of randomly
9+
selecting a neighbor for a candidate solution and only accept
10+
it if it results in an improvement. The strategy was proposed to
11+
address the limitations of deterministic hill climbing techniques
12+
that were likely to get stuck in local optima due to their greedy
13+
acceptance of neighboring moves.
14+
15+
This code implements the Random Mutation Hill Climbing algorithm,
16+
a specific instance of Stochastic Hill Climbing. It is applied to
17+
a binary string optimization problem called "One Max": prepare a
18+
string of all '1' bits where the cost function only reports the
19+
number of bits in a given string.
20+
21+
Implementation notes:
22+
The reference implementation uses a list of (one-character) strings.
23+
I opted to use a String object directly.
24+
25+
@author Chad Estioco
26+
"""
27+
28+
def onemax(vector):
29+
limit = len(vector)
30+
one_count = 0
31+
32+
for i in range(limit):
33+
if vector[i] == "1":
34+
one_count += 1
35+
36+
return one_count
37+
38+
def random_bitstring(num_bits):
39+
def generator():
40+
bit = None
41+
42+
if random.random() < 0.5:
43+
bit = "1"
44+
else:
45+
bit = "0"
46+
47+
return bit
48+
49+
return "".join(generator() for i in range(num_bits))
50+
51+
def random_neighbor(bitstring):
52+
mutant = bitstring
53+
limit = len(bitstring)
54+
pos = random.randint(0, limit - 1)
55+
56+
if mutant[pos] == "1":
57+
mutant = "".join((mutant[0:pos], "0" ,mutant[pos + 1:limit]))
58+
else:
59+
mutant = "".join((mutant[0:pos], "1" ,mutant[pos + 1:limit]))
60+
61+
return mutant
62+
63+
def search(max_iterations, num_bits):
64+
candidate = {}
65+
candidate["vector"] = random_bitstring(num_bits)
66+
candidate["cost"] = onemax(candidate["vector"])
67+
68+
for i in range(max_iterations):
69+
neighbor = {}
70+
neighbor["vector"] = random_neighbor(candidate["vector"])
71+
neighbor["cost"] = onemax(neighbor["vector"])
72+
73+
if neighbor["cost"] >= candidate["cost"]:
74+
candidate = neighbor
75+
76+
print("Iteration " + str(i) + ": best = " + str(candidate["cost"]))
77+
78+
if candidate["cost"] == num_bits:
79+
break
80+
81+
return candidate
82+
83+
if __name__ == "__main__":
84+
# problem configuration
85+
num_bits = 64
86+
87+
# algorithm configuration
88+
max_iterations = 1000
89+
90+
# execute the algoirthm
91+
best = search(max_iterations, num_bits)
92+
print("Done. Best Solution: cost = " + str(best["cost"]) + ", v = " + str(best["vector"]))

0 commit comments

Comments
 (0)