tidy up cost matrices

master
mcarfagno 2023-04-23 16:44:43 +01:00
parent 5476ded961
commit 0abc666052
3 changed files with 52 additions and 39 deletions

View File

@ -214,16 +214,15 @@ def run_sim():
action[1] = 0.0 # delta action[1] = 0.0 # delta
# Cost Matrices # Cost Matrices
Q = np.diag([20, 20, 10, 20]) # state error cost Q = [20, 20, 10, 20] # state error cost [x,y,v,yaw]
Qf = np.diag([30, 30, 30, 30]) # state final error cost Qf = [30, 30, 30, 30] # state error cost at final timestep [x,y,v,yaw]
R = np.diag([10, 10]) # input cost R = [10, 10] # input cost [acc ,steer]
R_ = np.diag([10, 10]) # input rate of change cost P = [10, 10] # input rate of change cost [acc ,steer]
mpc = mpcpy.MPC(P.N, P.M, Q, R) mpc = mpcpy.MPC(Q, Qf, R, P)
x_history = [] x_history = []
y_history = [] y_history = []
time.sleep(0.5)
input("\033[92m Press Enter to continue... \033[0m") input("\033[92m Press Enter to continue... \033[0m")
while 1: while 1:
@ -244,27 +243,29 @@ def run_sim():
p.disconnect() p.disconnect()
return return
# for MPC car ref frame is used # for MPC base link frame is used:
state[0:2] = 0.0 # so x, y, yaw are 0.0, but speed is the same
state[3] = 0.0 ego_state = [0.0, 0.0, state[2], 0.0]
# add 1 timestep delay to input # to account for MPC latency
state[0] = state[0] + state[2] * np.cos(state[3]) * P.DT # we simulate one timestep delay
state[1] = state[1] + state[2] * np.sin(state[3]) * P.DT ego_state[0] = ego_state[0] + ego_state[2] * np.cos(ego_state[3]) * P.DT
state[2] = state[2] + action[0] * P.DT ego_state[1] = ego_state[1] + ego_state[2] * np.sin(ego_state[3]) * P.DT
state[3] = state[3] + action[0] * np.tan(action[1]) / P.L * P.DT ego_state[2] = ego_state[2] + action[0] * P.DT
ego_state[3] = ego_state[3] + action[0] * np.tan(action[1]) / P.L * P.DT
# optimization loop # optimization loop
start = time.time() start = time.time()
# State Matrices # State Matrices
A, B, C = mpcpy.get_linear_model_matrices(state, action) A, B, C = mpcpy.get_linear_model_matrices(ego_state, action)
# Get Reference_traj -> inputs are in worldframe # Get Reference_traj
target, _ = mpcpy.get_ref_trajectory(get_state(car), path, 1.0) # NOTE: inputs are in world frame
target, _ = mpcpy.get_ref_trajectory(ego_state, path, P.TARGET_SPEED)
# MPC step # MPC step
acc, steer = mpc.optimize_linearized_model( action = mpc.optimize_linearized_model(
A, B, C, state, target, time_horizon=P.T, verbose=False A, B, C, state, target, time_horizon=P.T, verbose=False
) )

View File

@ -57,12 +57,29 @@ def get_linear_model_matrices(x_bar, u_bar):
class MPC: class MPC:
def __init__(self, N, M, Q, R): def __init__(self, state_cost, final_state_cost, input_cost, input_rate_cost):
""" """ """ """
self.state_len = N
self.action_len = M nx = P.N # number of state vars
self.state_cost = Q nu = P.M # umber of input/control vars
self.action_cost = R
if len(state_cost) != nx:
raise ValueError(f"State Error cost matrix shuld be of size {nx}")
if len(final_state_cost) != nx:
raise ValueError(f"End State Error cost matrix shuld be of size {nx}")
if len(input_cost) != nu:
raise ValueError(f"Control Effort cost matrix shuld be of size {nu}")
if len(input_rate_cost) != nu:
raise ValueError(
f"Control Effort Difference cost matrix shuld be of size {nu}"
)
self.dt = P.DT
self.control_horizon = P.T
self.Q = np.diag(state_cost)
self.Qf = np.diag(final_state_cost)
self.R = np.diag(input_cost)
self.P = np.diag(input_rate_cost)
self.u_bounds = np.array([P.MAX_ACC, P.MAX_STEER]) self.u_bounds = np.array([P.MAX_ACC, P.MAX_STEER])
self.du_bounds = np.array([P.MAX_D_ACC, P.MAX_D_STEER]) self.du_bounds = np.array([P.MAX_D_ACC, P.MAX_D_STEER])
@ -73,9 +90,6 @@ class MPC:
C, C,
initial_state, initial_state,
target, target,
control_horizon=10,
Q=None,
R=None,
verbose=False, verbose=False,
): ):
""" """
@ -84,29 +98,22 @@ class MPC:
:param B: :param B:
:param C: :param C:
:param initial_state: :param initial_state:
:param Q:
:param R:
:param target: :param target:
:param control_horizon:
:param verbose: :param verbose:
:return: :return:
""" """
assert len(initial_state) == self.state_len assert len(initial_state) == self.state_len
if Q == None or R == None:
Q = self.state_cost
R = self.action_cost
# Create variables # Create variables
x = opt.Variable((self.state_len, control_horizon + 1), name="states") x = opt.Variable((self.state_len, control_horizon + 1), name="states")
u = opt.Variable((self.action_len, control_horizon), name="actions") u = opt.Variable((self.action_len, control_horizon), name="actions")
cost = 0 cost = 0
constr = [] constr = []
for k in range(control_horizon): for k in range(self.control_horizon):
cost += opt.quad_form(target[:, k] - x[:, k], Q) cost += opt.quad_form(target[:, k] - x[:, k], self.Q)
cost += opt.quad_form(u[:, k], R) cost += opt.quad_form(u[:, k], self.R)
# Actuation rate of change # Actuation rate of change
if k < (control_horizon - 1): if k < (control_horizon - 1):
@ -117,11 +124,15 @@ class MPC:
# Actuation rate of change limit # Actuation rate of change limit
if t < (control_horizon - 1): if t < (control_horizon - 1):
constr += [opt.abs(u[0, k + 1] - u[0, k]) / P.DT <= self.du_bounds[0]] constr += [
constr += [opt.abs(u[1, k + 1] - u[1, k]) / P.DT <= self.du_bounds[1]] opt.abs(u[0, k + 1] - u[0, k]) / self.dt <= self.du_bounds[0]
]
constr += [
opt.abs(u[1, k + 1] - u[1, k]) / self.dt <= self.du_bounds[1]
]
# Final Point tracking # Final Point tracking
# cost += opt.quad_form(x[:, -1] - target[:,-1], Qf) cost += opt.quad_form(x[:, -1] - target[:, -1], self.Qf)
# initial state # initial state
constr += [x[:, 0] == initial_state] constr += [x[:, 0] == initial_state]

View File

@ -9,6 +9,7 @@ class Params:
self.DT = 0.2 # discretization step self.DT = 0.2 # discretization step
self.path_tick = 0.05 self.path_tick = 0.05
self.L = 0.3 # vehicle wheelbase self.L = 0.3 # vehicle wheelbase
self.TARGET_SPEED = 1.0 # m/s
self.MAX_SPEED = 1.5 # m/s self.MAX_SPEED = 1.5 # m/s
self.MAX_ACC = 1.0 # m/ss self.MAX_ACC = 1.0 # m/ss
self.MAX_D_ACC = 1.0 # m/sss self.MAX_D_ACC = 1.0 # m/sss