tidy up cost matrices
parent
5476ded961
commit
0abc666052
|
@ -214,16 +214,15 @@ def run_sim():
|
|||
action[1] = 0.0 # delta
|
||||
|
||||
# Cost Matrices
|
||||
Q = np.diag([20, 20, 10, 20]) # state error cost
|
||||
Qf = np.diag([30, 30, 30, 30]) # state final error cost
|
||||
R = np.diag([10, 10]) # input cost
|
||||
R_ = np.diag([10, 10]) # input rate of change cost
|
||||
Q = [20, 20, 10, 20] # state error cost [x,y,v,yaw]
|
||||
Qf = [30, 30, 30, 30] # state error cost at final timestep [x,y,v,yaw]
|
||||
R = [10, 10] # input cost [acc ,steer]
|
||||
P = [10, 10] # input rate of change cost [acc ,steer]
|
||||
|
||||
mpc = mpcpy.MPC(P.N, P.M, Q, R)
|
||||
mpc = mpcpy.MPC(Q, Qf, R, P)
|
||||
x_history = []
|
||||
y_history = []
|
||||
|
||||
time.sleep(0.5)
|
||||
input("\033[92m Press Enter to continue... \033[0m")
|
||||
|
||||
while 1:
|
||||
|
@ -244,27 +243,29 @@ def run_sim():
|
|||
p.disconnect()
|
||||
return
|
||||
|
||||
# for MPC car ref frame is used
|
||||
state[0:2] = 0.0
|
||||
state[3] = 0.0
|
||||
# for MPC base link frame is used:
|
||||
# so x, y, yaw are 0.0, but speed is the same
|
||||
ego_state = [0.0, 0.0, state[2], 0.0]
|
||||
|
||||
# add 1 timestep delay to input
|
||||
state[0] = state[0] + state[2] * np.cos(state[3]) * P.DT
|
||||
state[1] = state[1] + state[2] * np.sin(state[3]) * P.DT
|
||||
state[2] = state[2] + action[0] * P.DT
|
||||
state[3] = state[3] + action[0] * np.tan(action[1]) / P.L * P.DT
|
||||
# to account for MPC latency
|
||||
# we simulate one timestep delay
|
||||
ego_state[0] = ego_state[0] + ego_state[2] * np.cos(ego_state[3]) * P.DT
|
||||
ego_state[1] = ego_state[1] + ego_state[2] * np.sin(ego_state[3]) * P.DT
|
||||
ego_state[2] = ego_state[2] + action[0] * P.DT
|
||||
ego_state[3] = ego_state[3] + action[0] * np.tan(action[1]) / P.L * P.DT
|
||||
|
||||
# optimization loop
|
||||
start = time.time()
|
||||
|
||||
# State Matrices
|
||||
A, B, C = mpcpy.get_linear_model_matrices(state, action)
|
||||
A, B, C = mpcpy.get_linear_model_matrices(ego_state, action)
|
||||
|
||||
# Get Reference_traj -> inputs are in worldframe
|
||||
target, _ = mpcpy.get_ref_trajectory(get_state(car), path, 1.0)
|
||||
# Get Reference_traj
|
||||
# NOTE: inputs are in world frame
|
||||
target, _ = mpcpy.get_ref_trajectory(ego_state, path, P.TARGET_SPEED)
|
||||
|
||||
# MPC step
|
||||
acc, steer = mpc.optimize_linearized_model(
|
||||
action = mpc.optimize_linearized_model(
|
||||
A, B, C, state, target, time_horizon=P.T, verbose=False
|
||||
)
|
||||
|
||||
|
|
|
@ -57,12 +57,29 @@ def get_linear_model_matrices(x_bar, u_bar):
|
|||
|
||||
|
||||
class MPC:
|
||||
def __init__(self, N, M, Q, R):
|
||||
def __init__(self, state_cost, final_state_cost, input_cost, input_rate_cost):
|
||||
""" """
|
||||
self.state_len = N
|
||||
self.action_len = M
|
||||
self.state_cost = Q
|
||||
self.action_cost = R
|
||||
|
||||
nx = P.N # number of state vars
|
||||
nu = P.M # umber of input/control vars
|
||||
|
||||
if len(state_cost) != nx:
|
||||
raise ValueError(f"State Error cost matrix shuld be of size {nx}")
|
||||
if len(final_state_cost) != nx:
|
||||
raise ValueError(f"End State Error cost matrix shuld be of size {nx}")
|
||||
if len(input_cost) != nu:
|
||||
raise ValueError(f"Control Effort cost matrix shuld be of size {nu}")
|
||||
if len(input_rate_cost) != nu:
|
||||
raise ValueError(
|
||||
f"Control Effort Difference cost matrix shuld be of size {nu}"
|
||||
)
|
||||
|
||||
self.dt = P.DT
|
||||
self.control_horizon = P.T
|
||||
self.Q = np.diag(state_cost)
|
||||
self.Qf = np.diag(final_state_cost)
|
||||
self.R = np.diag(input_cost)
|
||||
self.P = np.diag(input_rate_cost)
|
||||
self.u_bounds = np.array([P.MAX_ACC, P.MAX_STEER])
|
||||
self.du_bounds = np.array([P.MAX_D_ACC, P.MAX_D_STEER])
|
||||
|
||||
|
@ -73,9 +90,6 @@ class MPC:
|
|||
C,
|
||||
initial_state,
|
||||
target,
|
||||
control_horizon=10,
|
||||
Q=None,
|
||||
R=None,
|
||||
verbose=False,
|
||||
):
|
||||
"""
|
||||
|
@ -84,29 +98,22 @@ class MPC:
|
|||
:param B:
|
||||
:param C:
|
||||
:param initial_state:
|
||||
:param Q:
|
||||
:param R:
|
||||
:param target:
|
||||
:param control_horizon:
|
||||
:param verbose:
|
||||
:return:
|
||||
"""
|
||||
|
||||
assert len(initial_state) == self.state_len
|
||||
|
||||
if Q == None or R == None:
|
||||
Q = self.state_cost
|
||||
R = self.action_cost
|
||||
|
||||
# Create variables
|
||||
x = opt.Variable((self.state_len, control_horizon + 1), name="states")
|
||||
u = opt.Variable((self.action_len, control_horizon), name="actions")
|
||||
cost = 0
|
||||
constr = []
|
||||
|
||||
for k in range(control_horizon):
|
||||
cost += opt.quad_form(target[:, k] - x[:, k], Q)
|
||||
cost += opt.quad_form(u[:, k], R)
|
||||
for k in range(self.control_horizon):
|
||||
cost += opt.quad_form(target[:, k] - x[:, k], self.Q)
|
||||
cost += opt.quad_form(u[:, k], self.R)
|
||||
|
||||
# Actuation rate of change
|
||||
if k < (control_horizon - 1):
|
||||
|
@ -117,11 +124,15 @@ class MPC:
|
|||
|
||||
# Actuation rate of change limit
|
||||
if t < (control_horizon - 1):
|
||||
constr += [opt.abs(u[0, k + 1] - u[0, k]) / P.DT <= self.du_bounds[0]]
|
||||
constr += [opt.abs(u[1, k + 1] - u[1, k]) / P.DT <= self.du_bounds[1]]
|
||||
constr += [
|
||||
opt.abs(u[0, k + 1] - u[0, k]) / self.dt <= self.du_bounds[0]
|
||||
]
|
||||
constr += [
|
||||
opt.abs(u[1, k + 1] - u[1, k]) / self.dt <= self.du_bounds[1]
|
||||
]
|
||||
|
||||
# Final Point tracking
|
||||
# cost += opt.quad_form(x[:, -1] - target[:,-1], Qf)
|
||||
cost += opt.quad_form(x[:, -1] - target[:, -1], self.Qf)
|
||||
|
||||
# initial state
|
||||
constr += [x[:, 0] == initial_state]
|
||||
|
|
|
@ -9,6 +9,7 @@ class Params:
|
|||
self.DT = 0.2 # discretization step
|
||||
self.path_tick = 0.05
|
||||
self.L = 0.3 # vehicle wheelbase
|
||||
self.TARGET_SPEED = 1.0 # m/s
|
||||
self.MAX_SPEED = 1.5 # m/s
|
||||
self.MAX_ACC = 1.0 # m/ss
|
||||
self.MAX_D_ACC = 1.0 # m/sss
|
||||
|
|
Loading…
Reference in New Issue