Skip to content

Commit b28a440

Browse files
authored
Merge pull request #1719 from Shaikh-Ubaid/perceptron_pkg
PKG: Add package lnn
2 parents dd2413b + fb669dd commit b28a440

File tree

8 files changed

+291
-49
lines changed

8 files changed

+291
-49
lines changed

integration_tests/CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -419,6 +419,7 @@ RUN(NAME str_to_list_cast LABELS cpython llvm c)
419419

420420
RUN(NAME test_package_01 LABELS cpython llvm)
421421
RUN(NAME test_pkg_lpdraw LABELS cpython llvm wasm)
422+
RUN(NAME test_pkg_lnn LABELS cpython llvm)
422423

423424
RUN(NAME generics_01 LABELS cpython llvm c)
424425
RUN(NAME generics_02 LABELS cpython llvm c)

integration_tests/lnn/__init__.py

Whitespace-only changes.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from .perceptron_main import init_perceptron, train_dataset, test_perceptron, normalize_input_vectors, print_perceptron, Perceptron
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
from lpython import dataclass, i32, f64
2+
from sys import exit
3+
4+
@dataclass
5+
class Perceptron:
6+
no_of_inputs: i32
7+
weights: list[f64]
8+
learn_rate: f64
9+
iterations_limit: i32
10+
des_accuracy: f64
11+
cur_accuracy: f64
12+
epochs_cnt: i32
13+
14+
def normalize(value: f64, leftMin: f64, leftMax: f64, rightMin: f64, rightMax: f64) -> f64:
15+
# Figure out how 'wide' each range is
16+
leftSpan: f64 = leftMax - leftMin
17+
rightSpan: f64 = rightMax - rightMin
18+
19+
# Convert the left range into a 0-1 range (float)
20+
valueScaled: f64 = (value - leftMin) / leftSpan
21+
22+
# Convert the 0-1 range into a value in the right range.
23+
return rightMin + (valueScaled * rightSpan)
24+
25+
def normalize_input_vectors(input_vectors: list[list[f64]]):
26+
rows: i32 = len(input_vectors)
27+
cols: i32 = len(input_vectors[0])
28+
29+
j: i32
30+
for j in range(cols):
31+
colMinVal: f64 = input_vectors[0][j]
32+
colMaxVal: f64 = input_vectors[0][j]
33+
i: i32
34+
for i in range(rows):
35+
if input_vectors[i][j] > colMaxVal:
36+
colMaxVal = input_vectors[i][j]
37+
if input_vectors[i][j] < colMinVal:
38+
colMinVal = input_vectors[i][j]
39+
40+
for i in range(rows):
41+
input_vectors[i][j] = normalize(input_vectors[i][j], colMinVal, colMaxVal, -1.0, 1.0)
42+
43+
44+
45+
def get_inp_vec_with_bias(a: list[f64]) -> list[f64]:
46+
b: list[f64] = []
47+
i: i32
48+
for i in range(len(a)):
49+
b.append(a[i])
50+
b.append(1.0)
51+
return b
52+
53+
def init_weights(size: i32) -> list[f64]:
54+
weights: list[f64] = []
55+
i: i32
56+
for i in range(size):
57+
weights.append(0.0)
58+
weights.append(0.0) # append bias
59+
return weights
60+
61+
def init_perceptron(p: Perceptron, n: i32, rate: f64, iterations_limit: i32, des_accuracy: f64):
62+
if (n < 1 or n > 1000):
63+
print("no_of_inputs must be between [1, 1000]")
64+
exit(1)
65+
p.no_of_inputs = n
66+
p.weights = init_weights(n)
67+
p.learn_rate = rate
68+
p.iterations_limit = iterations_limit
69+
p.des_accuracy = des_accuracy
70+
p.cur_accuracy = 0.0
71+
p.epochs_cnt = 0
72+
73+
def train_perceptron(p: Perceptron, input_vector: list[f64], actual_output: i32):
74+
predicted_output: i32 = predict_perceptron(p, input_vector)
75+
error: i32 = actual_output - predicted_output
76+
i: i32
77+
for i in range(len(input_vector)):
78+
p.weights[i] += p.learn_rate * f64(error) * f64(input_vector[i])
79+
80+
def predict_perceptron(p: Perceptron, input_vector: list[f64]) -> i32:
81+
weighted_sum: f64 = 0.0
82+
i: i32 = 0
83+
for i in range(len(input_vector)):
84+
weighted_sum = weighted_sum + p.weights[i] * f64(input_vector[i])
85+
return activation_function(weighted_sum)
86+
87+
def activation_function(value: f64) -> i32:
88+
if value >= 0.0:
89+
return 1
90+
return -1
91+
92+
def train_epoch(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]):
93+
i: i32
94+
for i in range(len(input_vectors)):
95+
input_vector: list[f64] = get_inp_vec_with_bias(input_vectors[i])
96+
if predict_perceptron(p, input_vector) != outputs[i]:
97+
train_perceptron(p, input_vector, outputs[i])
98+
99+
def train_dataset(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]):
100+
p.cur_accuracy = 0.0
101+
p.epochs_cnt = 0
102+
while p.cur_accuracy < p.des_accuracy and p.epochs_cnt < p.iterations_limit:
103+
p.epochs_cnt += 1
104+
train_epoch(p, input_vectors, outputs)
105+
p.cur_accuracy = test_perceptron(p, input_vectors, outputs)
106+
107+
def test_perceptron(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]) -> f64:
108+
correctly_classified_cnt: i32 = 0
109+
i: i32
110+
for i in range(len(input_vectors)):
111+
input_vector: list[f64] = get_inp_vec_with_bias(input_vectors[i])
112+
if predict_perceptron(p, input_vector) == outputs[i]:
113+
correctly_classified_cnt += 1
114+
return (correctly_classified_cnt / len(input_vectors)) * 100.0
115+
116+
def print_perceptron(p: Perceptron):
117+
print("weights = [", end = "")
118+
i: i32
119+
for i in range(p.no_of_inputs):
120+
print(p.weights[i], end = ", ")
121+
print(p.weights[p.no_of_inputs], end = "(bias)]\n")
122+
print("learn_rate = ", end = "")
123+
print(p.learn_rate)
124+
print("accuracy = ", end = "")
125+
print(p.cur_accuracy)
126+
print("epochs_cnt = ", end = "")
127+
print(p.epochs_cnt)

integration_tests/lpdraw/draw.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@
55
W = TypeVar("W")
66

77
def Pixel(H: i32, W: i32, Screen: i32[H, W], x: i32, y: i32) -> None:
8-
Screen[y, x] = 255
8+
if x >= 0 and y >= 0 and x < W and y < H:
9+
Screen[i32(int(H - 1 - y)), i32(int(x))] = 255
910

1011
def Clear(H: i32, W: i32, Screen: i32[H, W]):
1112
i: i32

integration_tests/test_pkg_lnn.py

+89
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
from lnn.perceptron import init_perceptron, print_perceptron, normalize_input_vectors, Perceptron, train_dataset
2+
from lpdraw import Line, Circle, Display, Clear
3+
from lpython import i32, f64, Const
4+
from numpy import empty, int32
5+
6+
7+
def compute_decision_boundary(p: Perceptron, x: f64) -> f64:
8+
bias: f64 = p.weights[-1]
9+
slope: f64 = (-p.weights[0] / p.weights[1])
10+
intercept: f64 = (-bias / p.weights[1])
11+
return slope * x + intercept
12+
13+
def plot_graph(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]):
14+
Width: Const[i32] = 500 # x-axis limits [0, 499]
15+
Height: Const[i32] = 500 # y-axis limits [0, 499]
16+
Screen: i32[Height, Width] = empty((Height, Width), dtype=int32)
17+
Clear(Height, Width, Screen)
18+
19+
x1: f64 = 2.0
20+
y1: f64 = compute_decision_boundary(p, x1)
21+
x2: f64 = -2.0
22+
y2: f64 = compute_decision_boundary(p, x2)
23+
24+
# center the graph using the following offset
25+
scale_offset: f64 = Width / 4
26+
shift_offset: f64 = Width / 2
27+
x1 *= scale_offset
28+
y1 *= scale_offset
29+
x2 *= scale_offset
30+
y2 *= scale_offset
31+
32+
# print (x1, y1, x2, y2)
33+
Line(Height, Width, Screen, i32(x1 + shift_offset), i32(y1 + shift_offset), i32(x2 + shift_offset), i32(y2 + shift_offset))
34+
35+
i: i32
36+
point_size: i32 = 5
37+
for i in range(len(input_vectors)):
38+
input_vectors[i][0] *= scale_offset
39+
input_vectors[i][1] *= scale_offset
40+
input_vectors[i][0] += shift_offset
41+
input_vectors[i][1] += shift_offset
42+
if outputs[i] == 1:
43+
x: i32 = i32(input_vectors[i][0])
44+
y: i32 = i32(input_vectors[i][1])
45+
Line(Height, Width, Screen, x - point_size, y, x + point_size, y)
46+
Line(Height, Width, Screen, x, y - point_size, x, y + point_size)
47+
else:
48+
Circle(Height, Width, Screen, i32(input_vectors[i][0]), i32(input_vectors[i][1]), f64(point_size))
49+
50+
Display(Height, Width, Screen)
51+
52+
def main0():
53+
p: Perceptron = Perceptron(0, [0.0], 0.0, 0, 0.0, 0.0, 0)
54+
init_perceptron(p, 2, 0.05, 10000, 90.0)
55+
print_perceptron(p)
56+
print("=================================")
57+
58+
input_vectors: list[list[f64]] = [[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]
59+
outputs: list[i32] = [1, 1, 1, -1]
60+
61+
normalize_input_vectors(input_vectors)
62+
train_dataset(p, input_vectors, outputs)
63+
print_perceptron(p)
64+
65+
assert p.cur_accuracy > 50.0
66+
assert p.epochs_cnt > 1
67+
68+
plot_graph(p, input_vectors, outputs)
69+
70+
def main1():
71+
p: Perceptron = Perceptron(0, [0.0], 0.0, 0, 0.0, 0.0, 0)
72+
init_perceptron(p, 2, 0.05, 10000, 90.0)
73+
print_perceptron(p)
74+
print("=================================")
75+
76+
input_vectors: list[list[f64]] = [[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0], [1.5, 1.0]]
77+
outputs: list[i32] = [1, 1, -1, 1, -1]
78+
79+
normalize_input_vectors(input_vectors)
80+
train_dataset(p, input_vectors, outputs)
81+
print_perceptron(p)
82+
83+
assert p.cur_accuracy > 50.0
84+
assert p.epochs_cnt > 1
85+
86+
plot_graph(p, input_vectors, outputs)
87+
88+
main0()
89+
main1()

src/libasr/pass/pass_array_by_data.cpp

+51-44
Original file line numberDiff line numberDiff line change
@@ -359,68 +359,75 @@ class EditProcedureCallsVisitor : public ASR::ASRPassBaseWalkVisitor<EditProcedu
359359
al(al_), v(v_) {}
360360

361361
template <typename T>
362-
void visit_Call(const T& x) {
363-
ASR::symbol_t* subrout_sym = x.m_name;
364-
bool is_external = ASR::is_a<ASR::ExternalSymbol_t>(*subrout_sym);
365-
subrout_sym = ASRUtils::symbol_get_past_external(subrout_sym);
366-
if( v.proc2newproc.find(subrout_sym) == v.proc2newproc.end() ) {
367-
bool args_updated = false;
368-
Vec<ASR::call_arg_t> new_args;
369-
new_args.reserve(al, x.n_args);
370-
for ( size_t i = 0; i < x.n_args; i++ ) {
371-
ASR::call_arg_t arg = x.m_args[i];
372-
ASR::expr_t* expr = arg.m_value;
373-
bool use_original_arg = true;
374-
if (expr) {
375-
if (ASR::is_a<ASR::Var_t>(*expr)) {
376-
ASR::Var_t* var = ASR::down_cast<ASR::Var_t>(expr);
377-
ASR::symbol_t* sym = var->m_v;
378-
if ( v.proc2newproc.find(sym) != v.proc2newproc.end() ) {
379-
ASR::symbol_t* new_var_sym = v.proc2newproc[sym].first;
380-
ASR::expr_t* new_var = ASRUtils::EXPR(ASR::make_Var_t(al, var->base.base.loc, new_var_sym));
381-
ASR::call_arg_t new_arg;
382-
new_arg.m_value = new_var;
383-
new_arg.loc = arg.loc;
384-
new_args.push_back(al, new_arg);
385-
args_updated = true;
386-
use_original_arg = false;
362+
void update_args_for_pass_arr_by_data_funcs_passed_as_callback(const T& x) {
363+
bool args_updated = false;
364+
Vec<ASR::call_arg_t> new_args;
365+
new_args.reserve(al, x.n_args);
366+
for ( size_t i = 0; i < x.n_args; i++ ) {
367+
ASR::call_arg_t arg = x.m_args[i];
368+
ASR::expr_t* expr = arg.m_value;
369+
if (expr) {
370+
if (ASR::is_a<ASR::Var_t>(*expr)) {
371+
ASR::Var_t* var = ASR::down_cast<ASR::Var_t>(expr);
372+
ASR::symbol_t* sym = var->m_v;
373+
if ( v.proc2newproc.find(sym) != v.proc2newproc.end() ) {
374+
ASR::symbol_t* new_var_sym = v.proc2newproc[sym].first;
375+
ASR::expr_t* new_var = ASRUtils::EXPR(ASR::make_Var_t(al, var->base.base.loc, new_var_sym));
376+
{
377+
// update exisiting arg
378+
arg.m_value = new_var;
379+
arg.loc = arg.loc;
387380
}
381+
args_updated = true;
388382
}
389383
}
390-
if( use_original_arg ) {
391-
new_args.push_back(al, arg);
392-
}
393384
}
394-
if (args_updated) {
395-
T&xx = const_cast<T&>(x);
396-
xx.m_args = new_args.p;
397-
xx.n_args = new_args.size();
398-
}
399-
return ;
385+
new_args.push_back(al, arg);
400386
}
387+
if (args_updated) {
388+
T&xx = const_cast<T&>(x);
389+
xx.m_args = new_args.p;
390+
xx.n_args = new_args.size();
391+
}
392+
}
401393

402-
ASR::symbol_t* new_func_sym = v.proc2newproc[subrout_sym].first;
403-
std::vector<size_t>& indices = v.proc2newproc[subrout_sym].second;
404-
394+
Vec<ASR::call_arg_t> construct_new_args(size_t n_args, ASR::call_arg_t* orig_args, std::vector<size_t>& indices) {
405395
Vec<ASR::call_arg_t> new_args;
406-
new_args.reserve(al, x.n_args);
407-
for( size_t i = 0; i < x.n_args; i++ ) {
408-
new_args.push_back(al, x.m_args[i]);
409-
if( std::find(indices.begin(), indices.end(), i) == indices.end() ||
410-
x.m_args[i].m_value == nullptr ) {
411-
continue ;
396+
new_args.reserve(al, n_args);
397+
for( size_t i = 0; i < n_args; i++ ) {
398+
new_args.push_back(al, orig_args[i]);
399+
if (orig_args[i].m_value == nullptr ||
400+
std::find(indices.begin(), indices.end(), i) == indices.end()) {
401+
continue;
412402
}
413403

414404
Vec<ASR::expr_t*> dim_vars;
415405
dim_vars.reserve(al, 2);
416-
ASRUtils::get_dimensions(x.m_args[i].m_value, dim_vars, al);
406+
ASRUtils::get_dimensions(orig_args[i].m_value, dim_vars, al);
417407
for( size_t j = 0; j < dim_vars.size(); j++ ) {
418408
ASR::call_arg_t dim_var;
419409
dim_var.loc = dim_vars[j]->base.loc;
420410
dim_var.m_value = dim_vars[j];
421411
new_args.push_back(al, dim_var);
422412
}
423413
}
414+
return new_args;
415+
}
416+
417+
template <typename T>
418+
void visit_Call(const T& x) {
419+
ASR::symbol_t* subrout_sym = x.m_name;
420+
bool is_external = ASR::is_a<ASR::ExternalSymbol_t>(*subrout_sym);
421+
subrout_sym = ASRUtils::symbol_get_past_external(subrout_sym);
422+
if( v.proc2newproc.find(subrout_sym) == v.proc2newproc.end() ) {
423+
update_args_for_pass_arr_by_data_funcs_passed_as_callback(x);
424+
return;
425+
}
426+
427+
ASR::symbol_t* new_func_sym = v.proc2newproc[subrout_sym].first;
428+
std::vector<size_t>& indices = v.proc2newproc[subrout_sym].second;
429+
430+
Vec<ASR::call_arg_t> new_args = construct_new_args(x.n_args, x.m_args, indices);
424431

425432
{
426433
ASR::Function_t* new_func_ = ASR::down_cast<ASR::Function_t>(new_func_sym);

0 commit comments

Comments
 (0)