-
Notifications
You must be signed in to change notification settings - Fork 9
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'master' of https://github.com/HugoGranstrom/numericalnim
- Loading branch information
Showing
7 changed files
with
339 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,3 +2,6 @@ nimcache/ | |
*.exe | ||
bin/ | ||
.vscode/ | ||
*.code-workspace | ||
*.html | ||
!*.* |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,108 @@ | ||
import strformat | ||
import arraymancer | ||
import sequtils | ||
import math | ||
|
||
proc steepest_descent*(deriv: proc(x: float64): float64, start: float64, gamma: float64 = 0.01, precision: float64 = 1e-5, max_iters: Natural = 1000):float64 {.inline.} = | ||
## Gradient descent optimization algorithm for finding local minimums of a function with derivative 'deriv' | ||
## | ||
## Assuming that a multivariable function F is defined and differentiable near a minimum, F(x) decreases fastest | ||
## when going in the direction negative to the gradient of F(a), similar to how water might traverse down a hill | ||
## following the path of least resistance. | ||
## can benefit from preconditioning if the condition number of the coefficient matrix is ill-conditioned | ||
## Input: | ||
## - deriv: derivative of a multivariable function F | ||
## - start: starting point near F's minimum | ||
## - gamma: step size multiplier, used to control the step size between iterations | ||
## - precision: numerical precision | ||
## - max_iters: maximum iterations | ||
## | ||
## Returns: | ||
## - float64. | ||
var | ||
current = 0.0 | ||
x = start | ||
|
||
for i in 0 .. max_iters: | ||
# calculate the next direction to propogate | ||
current = x | ||
x = current - gamma * deriv(current) | ||
|
||
# If we haven't moved much since the last iteration, break | ||
if abs(x - current) <= precision: | ||
break | ||
|
||
if i == max_iters: | ||
raise newException(ArithmeticError, "Maximum iterations for Steepest descent method exceeded") | ||
|
||
return x | ||
|
||
proc conjugate_gradient*[T](A, b, x_0: Tensor[T], tolerance: float64): Tensor[T] = | ||
## Conjugate Gradient method. | ||
## Given a Symmetric and Positive-Definite matrix A, solve the linear system Ax = b | ||
## Symmetric Matrix: Square matrix that is equal to its transpose, transpose(A) == A | ||
## Positive Definite: Square matrix such that transpose(x)Ax > 0 for all x in R^n | ||
## | ||
## Input: | ||
## - A: NxN square matrix | ||
## - b: vector on the right side of Ax=b | ||
## - x_0: Initial guess vector | ||
## | ||
## Returns: | ||
## - Tensor. | ||
|
||
var r = b - (A * x_0) | ||
var p = r | ||
var rsold = (r.transpose() * r)[0,0] # multiplication returns a Tensor, so need the first element | ||
|
||
result = x_0 | ||
|
||
var | ||
Ap = A | ||
alpha = 0.0 | ||
rsnew = 0.0 | ||
Ap_p = 0.0 | ||
|
||
for i in 1 .. b.shape[0]: | ||
Ap = A * p | ||
Ap_p = (p.transpose() * Ap)[0,0] | ||
alpha = rsold / Ap_p | ||
result = result + alpha * p | ||
r = r - alpha * Ap | ||
rsnew = (r.transpose() * r)[0,0] | ||
if sqrt(rsnew) < tolerance: | ||
break | ||
p = r + (rsnew / rsold) * p | ||
rsold = rsnew | ||
|
||
|
||
proc newtons*(f: proc(x: float64): float64, deriv: proc(x: float64): float64, start: float64, precision: float64 = 1e-5, max_iters: Natural = 1000): float64 {.raises: [ArithmeticError].} = | ||
## Newton-Raphson implementation for 1-dimensional functions | ||
|
||
## Given a single variable function f and it's derivative, calcuate an approximation to f(x) = 0 | ||
## Input: | ||
## - f: "Well behaved" function of a single variable with a known root | ||
## - deriv: derivative of f with respect to x | ||
## - start: starting x | ||
## - precision: numerical precision | ||
## - max_iters: maxmimum number of iterations | ||
## | ||
## Returns: | ||
## - float64. | ||
var | ||
x_iter = start | ||
i = 0 | ||
current_f = f(start) | ||
|
||
while abs(current_f) >= precision and i <= max_iters: | ||
current_f = f(x_iter) | ||
x_iter = x_iter - (current_f / deriv(x_iter)) | ||
i += 1 | ||
if i == max_iters: | ||
raise newException(ArithmeticError, "Maximum iterations for Newtons method exceeded") | ||
|
||
return x_iter - (current_f / deriv(x_iter)) | ||
|
||
|
||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
import unittest, math, sequtils, arraymancer | ||
import numericalnim | ||
|
||
test "steepest_descent func": | ||
proc df(x: float): float = 4 * x^3 - 9.0 * x^2 | ||
let start = 6.0 | ||
let gamma = 0.01 | ||
let precision = 0.00001 | ||
let max_iters = 10000 | ||
let correct = 2.24996 | ||
let value = steepest_descent(df, start, gamma, precision, max_iters) | ||
check isClose(value, correct, tol = 1e-5) | ||
|
||
test "steepest_descent func starting at zero": | ||
proc df(x: float): float = 4 * x^3 - 9.0 * x^2 + 4 | ||
let start = 0.0 | ||
let correct = -0.59301 | ||
let value = steepest_descent(df, start) | ||
check isClose(value, correct, tol = 1e-5) | ||
|
||
test "conjugate_gradient func": | ||
var A = toSeq([4.0, 1.0, 1.0, 3.0]).toTensor.reshape(2,2).astype(float64) | ||
var x = toSeq([2.0, 1.0]).toTensor.reshape(2,1) | ||
var b = toSeq([1.0,2.0]).toTensor.reshape(2,1) | ||
let tol = 0.001 | ||
let correct = toSeq([0.090909, 0.636363]).toTensor.reshape(2,1).astype(float64) | ||
|
||
let value = conjugate_gradient(A, b, x, tol) | ||
check isClose(value, correct, tol = 1e-5) | ||
|
||
test "Newtons 1 dimension func": | ||
proc f(x:float64): float64 = (1.0 / 3.0) * x ^ 3 - 2 * x ^ 2 + 3 * x | ||
proc df(x:float64): float64 = x ^ 2 - 4 * x + 3 | ||
let x = 0.5 | ||
let correct = 0.0 | ||
let value = newtons(f, df, x, 0.000001, 1000) | ||
check isClose(value, correct, tol=1e-5) | ||
|
||
test "Newtons 1 dimension func default args": | ||
proc f(x:float64): float64 = (1.0 / 3.0) * x ^ 3 - 2 * x ^ 2 + 3 * x | ||
proc df(x:float64): float64 = x ^ 2 - 4 * x + 3 | ||
let x = 0.5 | ||
let correct = 0.0 | ||
let value = newtons(f, df, x) | ||
check isClose(value, correct, tol=1e-5) | ||
|
||
test "Newtons unable to find a root": | ||
proc bad_f(x:float64): float64 = pow(E, x) + 1 | ||
proc bad_df(x:float64): float64 = pow(E, x) | ||
expect(ArithmeticError): | ||
discard newtons(bad_f, bad_df, 0, 0.000001, 1000) | ||
|
||
|
Oops, something went wrong.