From eaf64ffb24a3b1bd1d5b53b12ceff3eaf7fb4c22 Mon Sep 17 00:00:00 2001 From: mcarfagno Date: Fri, 21 Apr 2023 17:16:46 +0100 Subject: [PATCH] update mpc class --- mpc_pybullet_demo/mpcpy/cvxpy_mpc.py | 66 +++++++++++++--------------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/mpc_pybullet_demo/mpcpy/cvxpy_mpc.py b/mpc_pybullet_demo/mpcpy/cvxpy_mpc.py index 5696f38..a9c64f5 100755 --- a/mpc_pybullet_demo/mpcpy/cvxpy_mpc.py +++ b/mpc_pybullet_demo/mpcpy/cvxpy_mpc.py @@ -63,6 +63,8 @@ class MPC: self.action_len = M self.state_cost = Q self.action_cost = R + self.u_bounds = np.array([P.MAX_ACC, P.MAX_STEER]) + self.du_bounds = np.array([P.MAX_D_ACC, P.MAX_D_STEER]) def optimize_linearized_model( self, @@ -71,7 +73,7 @@ class MPC: C, initial_state, target, - time_horizon=10, + control_horizon=10, Q=None, R=None, verbose=False, @@ -85,7 +87,7 @@ class MPC: :param Q: :param R: :param target: - :param time_horizon: + :param control_horizon: :param verbose: :return: """ @@ -97,45 +99,37 @@ class MPC: R = self.action_cost # Create variables - x = opt.Variable((self.state_len, time_horizon + 1), name="states") - u = opt.Variable((self.action_len, time_horizon), name="actions") + x = opt.Variable((self.state_len, control_horizon + 1), name="states") + u = opt.Variable((self.action_len, control_horizon), name="actions") + cost = 0 + constr = [] - # Loop through the entire time_horizon and append costs - cost_function = [] - - for t in range(time_horizon): - - _cost = opt.quad_form(target[:, t + 1] - x[:, t + 1], Q) + opt.quad_form( - u[:, t], R - ) - - _constraints = [ - x[:, t + 1] == A @ x[:, t] + B @ u[:, t] + C, - u[0, t] >= -P.MAX_ACC, - u[0, t] <= P.MAX_ACC, - u[1, t] >= -P.MAX_STEER, - u[1, t] <= P.MAX_STEER, - ] - # opt.norm(target[:, t + 1] - x[:, t + 1], 1) <= 0.1] + for k in range(control_horizon): + cost += opt.quad_form(target[:, k] - x[:, k], Q) + cost += opt.quad_form(u[:, k], R) # Actuation rate of change - if t < (time_horizon - 1): - _cost += opt.quad_form(u[:, t + 1] - u[:, t], R * 1) - _constraints += [opt.abs(u[0, t + 1] - u[0, t]) / P.DT <= P.MAX_D_ACC] - _constraints += [opt.abs(u[1, t + 1] - u[1, t]) / P.DT <= P.MAX_D_STEER] + if k < (control_horizon - 1): + cost += opt.quad_form(u[:, k + 1] - u[:, k], self.P) - if t == 0: - # _constraints += [opt.norm(target[:, time_horizon] - x[:, time_horizon], 1) <= 0.01, - # x[:, 0] == initial_state] - _constraints += [x[:, 0] == initial_state] + # Kinematics Constrains + constr += [x[:, k + 1] == A @ x[:, k] + B @ u[:, k] + C] - cost_function.append( - opt.Problem(opt.Minimize(_cost), constraints=_constraints) - ) + # Actuation rate of change limit + if t < (control_horizon - 1): + constr += [opt.abs(u[0, k + 1] - u[0, k]) / P.DT <= self.du_bounds[0]] + constr += [opt.abs(u[1, k + 1] - u[1, k]) / P.DT <= self.du_bounds[1]] - # Add final cost - problem = sum(cost_function) + # Final Point tracking + # cost += opt.quad_form(x[:, -1] - target[:,-1], Qf) - # Minimize Problem - problem.solve(verbose=verbose, solver=opt.OSQP) + # initial state + constr += [x[:, 0] == initial_state] + + # actuation magnitude + constr += [opt.abs(u[:, 0]) <= self.u_bounds[0]] + constr += [opt.abs(u[:, 1]) <= self.u_bounds[1]] + + prob = opt.Problem(opt.Minimize(cost), constr) + solution = prob.solve(solver=opt.OSQP, warm_start=True, verbose=False) return x, u