Merge pull request #5 from mcarfagno/change-notation-to-borrelli

Change notation to borrelli
master
mcarfagno 2023-10-12 22:06:37 +01:00 committed by GitHub
commit 147670b5de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 131 additions and 127 deletions

View File

@ -18,7 +18,7 @@ SIM_START_V = 0.0
SIM_START_H = 0.0
L = 0.3
P = mpcpy.Params()
params = mpcpy.Params()
# Params
VEL = 1.0 # m/s
@ -31,25 +31,26 @@ class MPCSim:
self.state = np.array([SIM_START_X, SIM_START_Y, SIM_START_V, SIM_START_H])
# starting guess
self.action = np.zeros(P.M)
self.action[0] = P.MAX_ACC / 2 # a
self.action = np.zeros(params.M)
self.action[0] = params.MAX_ACC / 2 # a
self.action[1] = 0.0 # delta
self.opt_u = np.zeros((P.M, P.T))
self.K = int(params.T / params.DT)
self.opt_u = np.zeros((params.M, self.K))
# Cost Matrices
Q = np.diag([20, 20, 10, 20]) # state error cost
Qf = np.diag([30, 30, 30, 30]) # state final error cost
R = np.diag([10, 10]) # input cost
R_ = np.diag([10, 10]) # input rate of change cost
# Weights for Cost Matrices
Q = [20, 20, 10, 20] # state error cost
Qf = [30, 30, 30, 30] # state final error cost
R = [10, 10] # input cost
P = [10, 10] # input rate of change cost
self.mpc = mpcpy.MPC(P.N, P.M, Q, R)
self.mpc = mpcpy.MPC(Q, Qf, R, P)
# Interpolated Path to follow given waypoints
self.path = compute_path_from_wp(
[0, 3, 4, 6, 10, 12, 13, 13, 6, 1, 0],
[0, 0, 2, 4, 3, 3, -1, -2, -6, -2, -2],
P.path_tick,
0.05,
)
# Sim help vars
@ -113,9 +114,7 @@ class MPCSim:
# State Matrices
A, B, C = mpcpy.get_linear_model_matrices(curr_state, self.action)
# Get Reference_traj -> inputs are in worldframe
target, _ = mpcpy.get_ref_trajectory(
self.state, self.path, VEL, dl=P.path_tick
)
target, _ = mpcpy.get_ref_trajectory(self.state, self.path, VEL)
x_mpc, u_mpc = self.mpc.optimize_linearized_model(
A,
@ -123,13 +122,13 @@ class MPCSim:
C,
curr_state,
target,
time_horizon=P.T,
verbose=False,
)
# NOTE: used only for preview purposes
self.opt_u = np.vstack(
(
np.array(u_mpc.value[0, :]).flatten(),
(np.array(u_mpc.value[1, :]).flatten()),
np.array(u_mpc.value[1, :]).flatten(),
)
)
self.action[:] = [u_mpc.value[0, 0], u_mpc.value[1, 0]]
@ -143,12 +142,12 @@ class MPCSim:
dxdt = x[2] * np.cos(x[3])
dydt = x[2] * np.sin(x[3])
dvdt = u[0]
dtheta0dt = x[2] * np.tan(u[1]) / P.L
dtheta0dt = x[2] * np.tan(u[1]) / params.L
dqdt = [dxdt, dydt, dvdt, dtheta0dt]
return dqdt
# solve ODE
tspan = [0, P.DT]
tspan = [0, params.DT]
self.state = odeint(kinematics_model, self.state, tspan, args=(u[:],))[1]
def plot_sim(self):
@ -157,7 +156,7 @@ class MPCSim:
[TODO:description]
"""
self.sim_time = self.sim_time + P.DT
self.sim_time = self.sim_time + params.DT
self.x_history.append(self.state[0])
self.y_history.append(self.state[1])
self.v_history.append(self.state[2])
@ -231,7 +230,7 @@ class MPCSim:
# plt.title("Linear Velocity {} m/s".format(self.v_history[-1]))
plt.plot(self.a_history, c="tab:orange")
locs, _ = plt.xticks()
plt.xticks(locs[1:], locs[1:] * P.DT)
plt.xticks(locs[1:], locs[1:] * params.DT)
plt.ylabel("a(t) [m/ss]")
plt.xlabel("t [s]")
@ -240,7 +239,7 @@ class MPCSim:
plt.plot(np.degrees(self.d_history), c="tab:orange")
plt.ylabel("gamma(t) [deg]")
locs, _ = plt.xticks()
plt.xticks(locs[1:], locs[1:] * P.DT)
plt.xticks(locs[1:], locs[1:] * params.DT)
plt.xlabel("t [s]")
plt.tight_layout()

View File

@ -5,11 +5,11 @@ from matplotlib import animation
from mpcpy.utils import compute_path_from_wp
import mpcpy
P = mpcpy.Params()
params = mpcpy.Params()
import sys
import time
import pathlib
import pybullet as p
import time
@ -30,15 +30,12 @@ def get_state(robotId):
def set_ctrl(robotId, currVel, acceleration, steeringAngle):
gearRatio = 1.0 / 21
steering = [0, 2]
wheels = [8, 15]
maxForce = 50
targetVelocity = currVel + acceleration * P.DT
# targetVelocity=lastVel
# print(targetVelocity)
targetVelocity = currVel + acceleration * params.DT
for wheel in wheels:
p.setJointMotorControl2(
@ -95,10 +92,12 @@ def run_sim():
p.setTimeStep(1.0 / 120.0)
p.setRealTimeSimulation(useRealTimeSim) # either this
plane = p.loadURDF("racecar/plane.urdf")
# track = p.loadSDF("racecar/f10_racecar/meshes/barca_track.sdf", globalScaling=1)
file_path = pathlib.Path(__file__).parent.resolve()
plane = p.loadURDF(str(file_path) + "/racecar/plane.urdf")
car = p.loadURDF(
str(file_path) + "/racecar/f10_racecar/racecar_differential.urdf", [0, 0.3, 0.3]
)
car = p.loadURDF("racecar/f10_racecar/racecar_differential.urdf", [0, 0.3, 0.3])
for wheel in range(p.getNumJoints(car)):
# print("joint[",wheel,"]=", p.getJointInfo(car,wheel))
p.setJointMotorControl2(
@ -205,32 +204,30 @@ def run_sim():
path = compute_path_from_wp(
[0, 3, 4, 6, 10, 11, 12, 6, 1, 0],
[0, 0, 2, 4, 3, 3, -1, -6, -2, -2],
P.path_tick,
0.05,
)
for x_, y_ in zip(path[0, :], path[1, :]):
p.addUserDebugLine([x_, y_, 0], [x_, y_, 0.33], [0, 0, 1])
# starting guess
action = np.zeros(P.M)
action[0] = P.MAX_ACC / 2 # a
action = np.zeros(params.M)
action[0] = params.MAX_ACC / 2 # a
action[1] = 0.0 # delta
# Cost Matrices
Q = np.diag([20, 20, 10, 20]) # state error cost
Qf = np.diag([30, 30, 30, 30]) # state final error cost
R = np.diag([10, 10]) # input cost
R_ = np.diag([10, 10]) # input rate of change cost
Q = [20, 20, 10, 20] # state error cost [x,y,v,yaw]
Qf = [30, 30, 30, 30] # state error cost at final timestep [x,y,v,yaw]
R = [10, 10] # input cost [acc ,steer]
P = [10, 10] # input rate of change cost [acc ,steer]
mpc = mpcpy.MPC(P.N, P.M, Q, R)
mpc = mpcpy.MPC(Q, Qf, R, P)
x_history = []
y_history = []
time.sleep(0.5)
input("\033[92m Press Enter to continue... \033[0m")
while 1:
state = get_state(car)
x_history.append(state[0])
y_history.append(state[1])
@ -248,41 +245,43 @@ def run_sim():
p.disconnect()
return
# for MPC car ref frame is used
state[0:2] = 0.0
state[3] = 0.0
# Get Reference_traj
# NOTE: inputs are in world frame
target, _ = mpcpy.get_ref_trajectory(state, path, params.TARGET_SPEED)
# add 1 timestep delay to input
state[0] = state[0] + state[2] * np.cos(state[3]) * P.DT
state[1] = state[1] + state[2] * np.sin(state[3]) * P.DT
state[2] = state[2] + action[0] * P.DT
state[3] = state[3] + action[0] * np.tan(action[1]) / P.L * P.DT
# for MPC base link frame is used:
# so x, y, yaw are 0.0, but speed is the same
ego_state = np.array([0.0, 0.0, state[2], 0.0])
# to account for MPC latency
# simulate one timestep actuation delay
ego_state[0] = ego_state[0] + ego_state[2] * np.cos(ego_state[3]) * params.DT
ego_state[1] = ego_state[1] + ego_state[2] * np.sin(ego_state[3]) * params.DT
ego_state[2] = ego_state[2] + action[0] * params.DT
ego_state[3] = (
ego_state[3] + action[0] * np.tan(action[1]) / params.L * params.DT
)
# State Matrices
A, B, C = mpcpy.get_linear_model_matrices(ego_state, action)
# optimization loop
start = time.time()
# State Matrices
A, B, C = mpcpy.get_linear_model_matrices(state, action)
# Get Reference_traj -> inputs are in worldframe
target, _ = mpcpy.get_ref_trajectory(get_state(car), path, 1.0)
x_mpc, u_mpc = mpc.optimize_linearized_model(
A, B, C, state, target, time_horizon=P.T, verbose=False
# MPC step
_, u_mpc = mpc.optimize_linearized_model(
A, B, C, ego_state, target, verbose=False
)
# action = np.vstack((np.array(u_mpc.value[0,:]).flatten(),
# (np.array(u_mpc.value[1,:]).flatten())))
action[:] = [u_mpc.value[0, 1], u_mpc.value[1, 1]]
action[0] = u_mpc.value[0, 0]
action[1] = u_mpc.value[1, 0]
elapsed = time.time() - start
print("CVXPY Optimization Time: {:.4f}s".format(elapsed))
set_ctrl(car, state[2], action[0], action[1])
if P.DT - elapsed > 0:
time.sleep(P.DT - elapsed)
if params.DT - elapsed > 0:
time.sleep(params.DT - elapsed)
if __name__ == "__main__":

View File

@ -57,12 +57,31 @@ def get_linear_model_matrices(x_bar, u_bar):
class MPC:
def __init__(self, N, M, Q, R):
def __init__(self, state_cost, final_state_cost, input_cost, input_rate_cost):
""" """
self.state_len = N
self.action_len = M
self.state_cost = Q
self.action_cost = R
self.nx = P.N # number of state vars
self.nu = P.M # umber of input/control vars
if len(state_cost) != self.nx:
raise ValueError(f"State Error cost matrix shuld be of size {self.nx}")
if len(final_state_cost) != self.nx:
raise ValueError(f"End State Error cost matrix shuld be of size {self.nx}")
if len(input_cost) != self.nu:
raise ValueError(f"Control Effort cost matrix shuld be of size {self.nu}")
if len(input_rate_cost) != self.nu:
raise ValueError(
f"Control Effort Difference cost matrix shuld be of size {self.nu}"
)
self.dt = P.DT
self.control_horizon = int(P.T / P.DT)
self.Q = np.diag(state_cost)
self.Qf = np.diag(final_state_cost)
self.R = np.diag(input_cost)
self.P = np.diag(input_rate_cost)
self.u_bounds = np.array([P.MAX_ACC, P.MAX_STEER])
self.du_bounds = np.array([P.MAX_D_ACC, P.MAX_D_STEER])
def optimize_linearized_model(
self,
@ -71,9 +90,6 @@ class MPC:
C,
initial_state,
target,
time_horizon=10,
Q=None,
R=None,
verbose=False,
):
"""
@ -82,60 +98,49 @@ class MPC:
:param B:
:param C:
:param initial_state:
:param Q:
:param R:
:param target:
:param time_horizon:
:param verbose:
:return:
"""
assert len(initial_state) == self.state_len
if Q == None or R == None:
Q = self.state_cost
R = self.action_cost
assert len(initial_state) == self.nx
# Create variables
x = opt.Variable((self.state_len, time_horizon + 1), name="states")
u = opt.Variable((self.action_len, time_horizon), name="actions")
x = opt.Variable((self.nx, self.control_horizon + 1), name="states")
u = opt.Variable((self.nu, self.control_horizon), name="actions")
cost = 0
constr = []
# Loop through the entire time_horizon and append costs
cost_function = []
for t in range(time_horizon):
_cost = opt.quad_form(target[:, t + 1] - x[:, t + 1], Q) + opt.quad_form(
u[:, t], R
)
_constraints = [
x[:, t + 1] == A @ x[:, t] + B @ u[:, t] + C,
u[0, t] >= -P.MAX_ACC,
u[0, t] <= P.MAX_ACC,
u[1, t] >= -P.MAX_STEER,
u[1, t] <= P.MAX_STEER,
]
# opt.norm(target[:, t + 1] - x[:, t + 1], 1) <= 0.1]
for k in range(self.control_horizon):
cost += opt.quad_form(target[:, k] - x[:, k + 1], self.Q)
cost += opt.quad_form(u[:, k], self.R)
# Actuation rate of change
if t < (time_horizon - 1):
_cost += opt.quad_form(u[:, t + 1] - u[:, t], R * 1)
_constraints += [opt.abs(u[0, t + 1] - u[0, t]) / P.DT <= P.MAX_D_ACC]
_constraints += [opt.abs(u[1, t + 1] - u[1, t]) / P.DT <= P.MAX_D_STEER]
if k < (self.control_horizon - 1):
cost += opt.quad_form(u[:, k + 1] - u[:, k], self.P)
if t == 0:
# _constraints += [opt.norm(target[:, time_horizon] - x[:, time_horizon], 1) <= 0.01,
# x[:, 0] == initial_state]
_constraints += [x[:, 0] == initial_state]
# Kinematics Constrains
constr += [x[:, k + 1] == A @ x[:, k] + B @ u[:, k] + C]
cost_function.append(
opt.Problem(opt.Minimize(_cost), constraints=_constraints)
)
# Actuation rate of change limit
if k < (self.control_horizon - 1):
constr += [
opt.abs(u[0, k + 1] - u[0, k]) / self.dt <= self.du_bounds[0]
]
constr += [
opt.abs(u[1, k + 1] - u[1, k]) / self.dt <= self.du_bounds[1]
]
# Add final cost
problem = sum(cost_function)
# Final Point tracking
cost += opt.quad_form(x[:, -1] - target[:, -1], self.Qf)
# Minimize Problem
problem.solve(verbose=verbose, solver=opt.OSQP)
# initial state
constr += [x[:, 0] == initial_state]
# actuation magnitude
constr += [opt.abs(u[:, 0]) <= self.u_bounds[0]]
constr += [opt.abs(u[:, 1]) <= self.u_bounds[1]]
prob = opt.Problem(opt.Minimize(cost), constr)
solution = prob.solve(solver=opt.OSQP, warm_start=True, verbose=False)
return x, u

View File

@ -5,12 +5,12 @@ class Params:
def __init__(self):
self.N = 4 # number of state variables
self.M = 2 # number of control variables
self.T = 10 # Prediction Horizon
self.DT = 0.2 # discretization step
self.path_tick = 0.05
self.L = 0.3 # vehicle wheelbase
self.MAX_SPEED = 1.5 # m/s
self.MAX_ACC = 1.0 # m/ss
self.MAX_D_ACC = 1.0 # m/sss
self.MAX_STEER = np.radians(30) # rad
self.MAX_D_STEER = np.radians(30) # rad/s
self.T = 5 # Prediction Horizon [s]
self.DT = 0.2 # discretization step [s]
self.L = 0.3 # vehicle wheelbase [m]
self.TARGET_SPEED = 1.0 # [m/s
self.MAX_SPEED = 1.5 # [m/s
self.MAX_ACC = 1.0 # [m/ss
self.MAX_D_ACC = 1.0 # [m/sss
self.MAX_STEER = np.radians(30) # [rad]
self.MAX_D_STEER = np.radians(30) # [rad/s]

View File

@ -66,14 +66,14 @@ def normalize_angle(angle):
return angle
def get_ref_trajectory(state, path, target_v, dl=0.1):
def get_ref_trajectory(state, path, target_v):
"""
For each step in the time horizon
modified reference in robot frame
"""
xref = np.zeros((P.N, P.T + 1))
dref = np.zeros((1, P.T + 1))
# sp = np.ones((1,T +1))*target_v #speed profile
K = int(P.T / P.DT)
xref = np.zeros((P.N, K + 1))
dref = np.zeros((1, K + 1))
ncourse = path.shape[1]
ind = get_nn_idx(state, path)
dx = path[0, ind] - state[0]
@ -84,7 +84,8 @@ def get_ref_trajectory(state, path, target_v, dl=0.1):
xref[3, 0] = normalize_angle(path[2, ind] - state[3]) # Theta
dref[0, 0] = 0.0 # Steer operational point should be 0
travel = 0.0
for i in range(1, P.T + 1):
dl = np.hypot(path[0, 1] - path[0, 0], path[1, 1] - path[1, 0])
for i in range(1, K + 1):
travel += abs(target_v) * P.DT
dind = int(round(travel / dl))
if (ind + dind) < ncourse: