update mpc class
parent
f7fadeca56
commit
eaf64ffb24
|
@ -63,6 +63,8 @@ class MPC:
|
||||||
self.action_len = M
|
self.action_len = M
|
||||||
self.state_cost = Q
|
self.state_cost = Q
|
||||||
self.action_cost = R
|
self.action_cost = R
|
||||||
|
self.u_bounds = np.array([P.MAX_ACC, P.MAX_STEER])
|
||||||
|
self.du_bounds = np.array([P.MAX_D_ACC, P.MAX_D_STEER])
|
||||||
|
|
||||||
def optimize_linearized_model(
|
def optimize_linearized_model(
|
||||||
self,
|
self,
|
||||||
|
@ -71,7 +73,7 @@ class MPC:
|
||||||
C,
|
C,
|
||||||
initial_state,
|
initial_state,
|
||||||
target,
|
target,
|
||||||
time_horizon=10,
|
control_horizon=10,
|
||||||
Q=None,
|
Q=None,
|
||||||
R=None,
|
R=None,
|
||||||
verbose=False,
|
verbose=False,
|
||||||
|
@ -85,7 +87,7 @@ class MPC:
|
||||||
:param Q:
|
:param Q:
|
||||||
:param R:
|
:param R:
|
||||||
:param target:
|
:param target:
|
||||||
:param time_horizon:
|
:param control_horizon:
|
||||||
:param verbose:
|
:param verbose:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
|
@ -97,45 +99,37 @@ class MPC:
|
||||||
R = self.action_cost
|
R = self.action_cost
|
||||||
|
|
||||||
# Create variables
|
# Create variables
|
||||||
x = opt.Variable((self.state_len, time_horizon + 1), name="states")
|
x = opt.Variable((self.state_len, control_horizon + 1), name="states")
|
||||||
u = opt.Variable((self.action_len, time_horizon), name="actions")
|
u = opt.Variable((self.action_len, control_horizon), name="actions")
|
||||||
|
cost = 0
|
||||||
|
constr = []
|
||||||
|
|
||||||
# Loop through the entire time_horizon and append costs
|
for k in range(control_horizon):
|
||||||
cost_function = []
|
cost += opt.quad_form(target[:, k] - x[:, k], Q)
|
||||||
|
cost += opt.quad_form(u[:, k], R)
|
||||||
for t in range(time_horizon):
|
|
||||||
|
|
||||||
_cost = opt.quad_form(target[:, t + 1] - x[:, t + 1], Q) + opt.quad_form(
|
|
||||||
u[:, t], R
|
|
||||||
)
|
|
||||||
|
|
||||||
_constraints = [
|
|
||||||
x[:, t + 1] == A @ x[:, t] + B @ u[:, t] + C,
|
|
||||||
u[0, t] >= -P.MAX_ACC,
|
|
||||||
u[0, t] <= P.MAX_ACC,
|
|
||||||
u[1, t] >= -P.MAX_STEER,
|
|
||||||
u[1, t] <= P.MAX_STEER,
|
|
||||||
]
|
|
||||||
# opt.norm(target[:, t + 1] - x[:, t + 1], 1) <= 0.1]
|
|
||||||
|
|
||||||
# Actuation rate of change
|
# Actuation rate of change
|
||||||
if t < (time_horizon - 1):
|
if k < (control_horizon - 1):
|
||||||
_cost += opt.quad_form(u[:, t + 1] - u[:, t], R * 1)
|
cost += opt.quad_form(u[:, k + 1] - u[:, k], self.P)
|
||||||
_constraints += [opt.abs(u[0, t + 1] - u[0, t]) / P.DT <= P.MAX_D_ACC]
|
|
||||||
_constraints += [opt.abs(u[1, t + 1] - u[1, t]) / P.DT <= P.MAX_D_STEER]
|
|
||||||
|
|
||||||
if t == 0:
|
# Kinematics Constrains
|
||||||
# _constraints += [opt.norm(target[:, time_horizon] - x[:, time_horizon], 1) <= 0.01,
|
constr += [x[:, k + 1] == A @ x[:, k] + B @ u[:, k] + C]
|
||||||
# x[:, 0] == initial_state]
|
|
||||||
_constraints += [x[:, 0] == initial_state]
|
|
||||||
|
|
||||||
cost_function.append(
|
# Actuation rate of change limit
|
||||||
opt.Problem(opt.Minimize(_cost), constraints=_constraints)
|
if t < (control_horizon - 1):
|
||||||
)
|
constr += [opt.abs(u[0, k + 1] - u[0, k]) / P.DT <= self.du_bounds[0]]
|
||||||
|
constr += [opt.abs(u[1, k + 1] - u[1, k]) / P.DT <= self.du_bounds[1]]
|
||||||
|
|
||||||
# Add final cost
|
# Final Point tracking
|
||||||
problem = sum(cost_function)
|
# cost += opt.quad_form(x[:, -1] - target[:,-1], Qf)
|
||||||
|
|
||||||
# Minimize Problem
|
# initial state
|
||||||
problem.solve(verbose=verbose, solver=opt.OSQP)
|
constr += [x[:, 0] == initial_state]
|
||||||
|
|
||||||
|
# actuation magnitude
|
||||||
|
constr += [opt.abs(u[:, 0]) <= self.u_bounds[0]]
|
||||||
|
constr += [opt.abs(u[:, 1]) <= self.u_bounds[1]]
|
||||||
|
|
||||||
|
prob = opt.Problem(opt.Minimize(cost), constr)
|
||||||
|
solution = prob.solve(solver=opt.OSQP, warm_start=True, verbose=False)
|
||||||
return x, u
|
return x, u
|
||||||
|
|
Loading…
Reference in New Issue