-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathexplicitmpc.jl
254 lines (228 loc) · 9.05 KB
/
explicitmpc.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
struct ExplicitMPC{NT<:Real, SE<:StateEstimator} <: PredictiveController{NT}
estim::SE
transcription::SingleShooting
Z̃::Vector{NT}
ŷ::Vector{NT}
Hp::Int
Hc::Int
nϵ::Int
weights::ControllerWeights{NT}
R̂u::Vector{NT}
R̂y::Vector{NT}
P̃Δu::Matrix{NT}
P̃u ::Matrix{NT}
Tu ::Matrix{NT}
Tu_lastu0::Vector{NT}
Ẽ::Matrix{NT}
F::Vector{NT}
G::Matrix{NT}
J::Matrix{NT}
K::Matrix{NT}
V::Matrix{NT}
B::Vector{NT}
H̃::Hermitian{NT, Matrix{NT}}
q̃::Vector{NT}
r::Vector{NT}
H̃_chol::Cholesky{NT, Matrix{NT}}
Ks::Matrix{NT}
Ps::Matrix{NT}
d0::Vector{NT}
D̂0::Vector{NT}
D̂e::Vector{NT}
Uop::Vector{NT}
Yop::Vector{NT}
Dop::Vector{NT}
buffer::PredictiveControllerBuffer{NT}
function ExplicitMPC{NT}(
estim::SE, Hp, Hc, M_Hp, N_Hc, L_Hp
) where {NT<:Real, SE<:StateEstimator}
model = estim.model
nu, ny, nd, nx̂ = model.nu, model.ny, model.nd, estim.nx̂
ŷ = copy(model.yop) # dummy vals (updated just before optimization)
nϵ = 0 # no slack variable ϵ for ExplicitMPC
weights = ControllerWeights{NT}(model, Hp, Hc, M_Hp, N_Hc, L_Hp)
# convert `Diagonal` to normal `Matrix` if required:
M_Hp = Hermitian(convert(Matrix{NT}, M_Hp), :L)
N_Hc = Hermitian(convert(Matrix{NT}, N_Hc), :L)
L_Hp = Hermitian(convert(Matrix{NT}, L_Hp), :L)
# dummy vals (updated just before optimization):
R̂y, R̂u, Tu_lastu0 = zeros(NT, ny*Hp), zeros(NT, nu*Hp), zeros(NT, nu*Hp)
transcription = SingleShooting() # explicit MPC only supports SingleShooting
PΔu = init_ZtoΔU(estim, transcription, Hp, Hc)
Pu, Tu = init_ZtoU(estim, transcription, Hp, Hc)
E, G, J, K, V, B = init_predmat(model, estim, transcription, Hp, Hc)
# dummy val (updated just before optimization):
F, fx̂ = zeros(NT, ny*Hp), zeros(NT, nx̂)
P̃Δu, P̃u, Ñ_Hc, Ẽ = PΔu, Pu, N_Hc, E # no slack variable ϵ for ExplicitMPC
H̃ = init_quadprog(model, weights, Ẽ, P̃Δu, P̃u)
# dummy vals (updated just before optimization):
q̃, r = zeros(NT, size(H̃, 1)), zeros(NT, 1)
H̃_chol = cholesky(H̃)
Ks, Ps = init_stochpred(estim, Hp)
# dummy vals (updated just before optimization):
d0, D̂0, D̂e = zeros(NT, nd), zeros(NT, nd*Hp), zeros(NT, nd + nd*Hp)
Uop, Yop, Dop = repeat(model.uop, Hp), repeat(model.yop, Hp), repeat(model.dop, Hp)
nZ̃ = get_nZ(estim, transcription, Hp, Hc) + nϵ
Z̃ = zeros(NT, nZ̃)
buffer = PredictiveControllerBuffer(estim, transcription, Hp, Hc, nϵ)
mpc = new{NT, SE}(
estim,
transcription,
Z̃, ŷ,
Hp, Hc, nϵ,
weights,
R̂u, R̂y,
P̃Δu, P̃u, Tu, Tu_lastu0,
Ẽ, F, G, J, K, V, B,
H̃, q̃, r,
H̃_chol,
Ks, Ps,
d0, D̂0, D̂e,
Uop, Yop, Dop,
buffer
)
return mpc
end
end
@doc raw"""
ExplicitMPC(model::LinModel; <keyword arguments>)
Construct an explicit linear predictive controller based on [`LinModel`](@ref) `model`.
The controller minimizes the following objective function at each discrete time ``k``:
```math
\begin{aligned}
\min_{\mathbf{ΔU}} \mathbf{(R̂_y - Ŷ)}' \mathbf{M}_{H_p} \mathbf{(R̂_y - Ŷ)}
+ \mathbf{(ΔU)}' \mathbf{N}_{H_c} \mathbf{(ΔU)} \\
+ \mathbf{(R̂_u - U)}' \mathbf{L}_{H_p} \mathbf{(R̂_u - U)}
\end{aligned}
```
See [`LinMPC`](@ref) for the variable definitions. This controller does not support
constraints but the computational costs are extremely low (array division), therefore
suitable for applications that require small sample times. The keyword arguments are
identical to [`LinMPC`](@ref), except for `Cwt` and `optim` which are not supported. This
controller uses a [`SingleShooting`](@ref) transcription method.
This method uses the default state estimator, a [`SteadyKalmanFilter`](@ref) with default
arguments. This controller is allocation-free.
# Examples
```jldoctest
julia> model = LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 4);
julia> mpc = ExplicitMPC(model, Mwt=[0, 1], Nwt=[0.5], Hp=30, Hc=1)
ExplicitMPC controller with a sample time Ts = 4.0 s, SteadyKalmanFilter estimator and:
30 prediction steps Hp
1 control steps Hc
1 manipulated inputs u (0 integrating states)
4 estimated states x̂
2 measured outputs ym (2 integrating states)
0 unmeasured outputs yu
0 measured disturbances d
```
"""
function ExplicitMPC(
model::LinModel;
Hp::Int = default_Hp(model),
Hc::Int = DEFAULT_HC,
Mwt = fill(DEFAULT_MWT, model.ny),
Nwt = fill(DEFAULT_NWT, model.nu),
Lwt = fill(DEFAULT_LWT, model.nu),
M_Hp = diagm(repeat(Mwt, Hp)),
N_Hc = diagm(repeat(Nwt, Hc)),
L_Hp = diagm(repeat(Lwt, Hp)),
kwargs...
)
estim = SteadyKalmanFilter(model; kwargs...)
return ExplicitMPC(estim; Hp, Hc, Mwt, Nwt, Lwt, M_Hp, N_Hc, L_Hp)
end
"""
ExplicitMPC(estim::StateEstimator; <keyword arguments>)
Use custom state estimator `estim` to construct `ExplicitMPC`.
`estim.model` must be a [`LinModel`](@ref). Else, a [`NonLinMPC`](@ref) is required.
"""
function ExplicitMPC(
estim::SE;
Hp::Int = default_Hp(estim.model),
Hc::Int = DEFAULT_HC,
Mwt = fill(DEFAULT_MWT, estim.model.ny),
Nwt = fill(DEFAULT_NWT, estim.model.nu),
Lwt = fill(DEFAULT_LWT, estim.model.nu),
M_Hp = diagm(repeat(Mwt, Hp)),
N_Hc = diagm(repeat(Nwt, Hc)),
L_Hp = diagm(repeat(Lwt, Hp)),
) where {NT<:Real, SE<:StateEstimator{NT}}
isa(estim.model, LinModel) || error("estim.model type must be a LinModel")
nk = estimate_delays(estim.model)
if Hp ≤ nk
@warn("prediction horizon Hp ($Hp) ≤ estimated number of delays in model "*
"($nk), the closed-loop system may be unstable or zero-gain (unresponsive)")
end
return ExplicitMPC{NT}(estim, Hp, Hc, M_Hp, N_Hc, L_Hp)
end
setconstraint!(::ExplicitMPC; kwargs...) = error("ExplicitMPC does not support constraints.")
function Base.show(io::IO, mpc::ExplicitMPC)
Hp, Hc = mpc.Hp, mpc.Hc
nu, nd = mpc.estim.model.nu, mpc.estim.model.nd
nx̂, nym, nyu = mpc.estim.nx̂, mpc.estim.nym, mpc.estim.nyu
n = maximum(ndigits.((Hp, Hc, nu, nx̂, nym, nyu, nd))) + 1
println(io, "$(typeof(mpc).name.name) controller with a sample time Ts = "*
"$(mpc.estim.model.Ts) s, "*
"$(typeof(mpc.estim).name.name) estimator and:")
println(io, "$(lpad(Hp, n)) prediction steps Hp")
println(io, "$(lpad(Hc, n)) control steps Hc")
print_estim_dim(io, mpc.estim, n)
end
linconstraint!(::ExplicitMPC, ::LinModel, ::TranscriptionMethod) = nothing
@doc raw"""
optim_objective!(mpc::ExplicitMPC) -> Z̃
Analytically solve the optimization problem for [`ExplicitMPC`](@ref).
The solution is ``\mathbf{Z̃ = - H̃^{-1} q̃}``, see [`init_quadprog`](@ref).
"""
optim_objective!(mpc::ExplicitMPC) = lmul!(-1, ldiv!(mpc.Z̃, mpc.H̃_chol, mpc.q̃))
"Compute the predictions but not the terminal states if `mpc` is an [`ExplicitMPC`](@ref)."
function predict!(
Ŷ0, x̂0end, _ , _ , _ , mpc::ExplicitMPC, ::LinModel, ::TranscriptionMethod, _ , Z̃
)
# in-place operations to reduce allocations :
Ŷ0 .= mul!(Ŷ0, mpc.Ẽ, Z̃) .+ mpc.F
x̂0end .= NaN
return Ŷ0, x̂0end
end
"`ExplicitMPC` does not support custom nonlinear constraint, return `true`."
iszero_nc(mpc::ExplicitMPC) = true
"""
addinfo!(info, mpc::ExplicitMPC) -> info
For [`ExplicitMPC`](@ref), add nothing to `info`.
"""
addinfo!(info, mpc::ExplicitMPC) = info
"Update the prediction matrices and Cholesky factorization."
function setmodel_controller!(mpc::ExplicitMPC, _ )
model, estim, transcription = mpc.estim.model, mpc.estim, mpc.transcription
nu, ny, nd, Hp, Hc = model.nu, model.ny, model.nd, mpc.Hp, mpc.Hc
# --- predictions matrices ---
E, G, J, K, V, B = init_predmat(model, estim, transcription, Hp, Hc)
Ẽ = E # no slack variable ϵ for ExplicitMPC
mpc.Ẽ .= Ẽ
mpc.G .= G
mpc.J .= J
mpc.K .= K
mpc.V .= V
mpc.B .= B
# --- quadratic programming Hessian matrix ---
H̃ = init_quadprog(model, mpc.weights, mpc.Ẽ, mpc.P̃Δu, mpc.P̃u)
mpc.H̃ .= H̃
set_objective_hessian!(mpc)
# --- operating points ---
for i in 0:Hp-1
mpc.Uop[(1+nu*i):(nu+nu*i)] .= model.uop
mpc.Yop[(1+ny*i):(ny+ny*i)] .= model.yop
mpc.Dop[(1+nd*i):(nd+nd*i)] .= model.dop
end
return nothing
end
"Update the Cholesky factorization of the Hessian matrix."
function set_objective_hessian!(mpc::ExplicitMPC)
H̃_chol = cholesky(mpc.H̃)
mpc.H̃_chol.factors .= H̃_chol.factors
return nothing
end
"Called by plots recipes for manipulated inputs constraints."
getUcon(mpc::ExplicitMPC, nu) = fill(-Inf, mpc.Hp*nu), fill(+Inf, mpc.Hp*nu)
"Called by plots recipes for predicted output constraints."
getYcon(mpc::ExplicitMPC, ny) = fill(-Inf, mpc.Hp*ny), fill(+Inf, mpc.Hp*ny)