371Appendix B
fprintf('%3d %8.3f %8.3f %8.3f %8.3f % 8.3f
\n',i,gx,gbest)
end
fprintf('_____________________________________________ \n')
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% MATLAB code func1.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% y -> objective function
% penalty -> penalty term
%
function y = func1(x,scale_factor)
y = 1.10471*x(1)*x(1)*x(2) + 0.04811*x(3)*x(4)*(14+x(2));
penalty = 0.0;
[h,g] = constr(x);
for i = 1:length(h)
if h(i)~=0
penalty = penalty + h(i)^2;
end
end
for i = 1:length(g)
if g(i)>0
penalty = penalty + g(i)^2;
end
end
y = y+penalty*scale_factor;
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% MATLAB code constr.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% define your constraints here
% g(1), g(2) -> inequality constraints
% h(1), h(2), -> equality constraints
%
function [h,g] = constr(x)
h(1) = 0;
load = 6000;
length = 14;
modulusE = 30e6;
modulusG = 12e6;
tmax = 13600;
sigmamax = 30000;
delmax = 0.25;
tdash = load/(sqrt(2)*x(1)*x(2));
R = sqrt(x(2)*x(2)/4 + ((x(1)+x(3))/2)^2);
M = load*(length + x(2)/2);
J = 2* ((x(1)*x(2)/sqrt(2)) * (x(2)^2/12 +((x(1)+x(3))/2)^2));
tdashdash = M*R/J;
tx = sqrt(tdash^2 + 2*tdash*tdashdash*x(2)/(2*R) + tdashdash^2);
372 Appendix B
sigmax = 6*load*length/(x(4)*x(3)^2);
delx = 4*load*length^3/(modulusE*x(4)*x(3)^3);
pcx = (4.013*sqrt(modulusE*modulusG*x(3)^2*x(4)^6/36)/(length^2)) *
(1- (x(3)/(2*length))*sqrt(modulusE/(4*modulusG)));
g(1) = tx/tmax -1;
g(2) = sigmax/sigmamax -1;
g(3) = x(1) - x(4);
g(4) = (.10471*x(1)*x(1) + 0.04811*x(3)*x(4)*(14+x(2)))/5 -1;
g(5) = 0.125 - x(1);
g(6) = delx/delmax -1;
g(7) = load/pcx -1;
g(8) = x(1)/2 -1;
g(9) = x(4)/2 -1;
g(10) = -x(1) + 0.1;
g(11) = -x(4) + 0.1;
g(12) = x(2)/10 -1;
g(13) = x(3)/10 -1;
g(14) = -x(2) + 0.1;
g(15) = -x(3) + 0.1;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% MATLAB code ALM.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% n_of_var -> number of design variables
% x = [0 1 1] -> starting value of x
% epsilon1, epsilon2 -> constant used for terminating
% the algorithm
% delx -> required for gradient computation
% falpha_prev -> function value at first/previous iteration
% deriv -> gradient vector
% deltag -> difference in gradient vector (over previous iteration)
% A -> approximation of inverse of the hessian matrix
% search -> search direction
% LAMBDA, BETA -> Lagrange Multipliers
% RK -> penalty parameter
%
clear all
clc
n_of_var = 2;
n_of_eqcons = 1;
n_of_iqcons = 2;
scale_factor = 1;
global LAMBDA RK BETA EQCONSTR ICONSTR FVALUE
LAMBDA = zeros(1,n_of_eqcons);
BETA = zeros(1,n_of_iqcons);
x = [0 1 1];
RK = x(3);
A = eye(length(x));
epsilon1 = 1e-6;
epsilon2 = 1e-6;
delx = 1e-3;
checkconstr = zeros(1,n_of_iqcons);
falpha_prev = func1(x,scale_factor);
373Appendix B
fprintf('Initial function value = %7.4f\n ',FVALUE)
fprintf(' No. x-vector rk f(x) |Cons.| \n')
fprintf('___________________________________________________\n')
for i = 1:30
if i==1
deriv_prev = grad_vec(x,delx,n_of_var,scale_factor);
search = -deriv_prev;
[alpha,falpha] = golden_funct1(x,search,scale_factor);
if abs(falpha-falpha_prev)<epsilon1
break;
end
falpha_prev = falpha;
x = x + alpha*search;
yyy = func1(x,scale_factor);
LAMBDA = LAMBDA + 2*RK*EQCONSTR;
BETA = BETA + 2*RK*(max([ICONSTR; -BETA./(2*RK)]));
checkconstr1 = max([ICONSTR;checkconstr]);
fpri ntf('%3d %8.3f %8.3f % 8.3f % 8.3f % 8.3f \n',i,x,FVALUE,
norm([EQCONSTR checkconstr1]))
else
deltax = (alpha*search);
if i>2
deltax = deltax';
end
deriv = grad_vec(x,delx,n_of_var,scale_factor);
deltag = deriv-deriv_prev;
term1 = (deltax'*deltax)/(deltax*deltag');
term2 = (A*deltag'*deltag*A)/(deltag*A*deltag');
A = A + term1 - term2;
search = -A*deriv';
[alpha,falpha] = golden_funct1(x,search',scale_factor);
checkconstr1 = max([ICONSTR;checkconstr]);
fpri ntf('%3d %8.3f %8.3f % 8.3f % 8.3f % 8.3f \n',i,x,FVALUE,
norm([EQCONSTR checkconstr1]))
if abs(falpha-falpha_prev)<epsilon1 || norm(deriv)<epsilon2
break;
end
falpha_prev = falpha;
deriv_prev = deriv;
x = x+alpha*search';
yyy = func1(x,scale_factor);
LAMBDA = LAMBDA + 2*RK*EQCONSTR;
BETA = BETA + 2*RK*(max([ICONSTR; -BETA./(2*RK)]));
end
end
fprintf('_________________________________________________\n\n')
if LAMBDA>=0 & BETA>=0
fprintf('KKT Conditions are satisfied \n\n')
end
fprintf('Lagrange Multipliers: \n\n')
disp([LAMBDA BETA])
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
374 Appendix B
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% MATLAB code func1.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
function y = func1(x,scale_factor)
global LAMBDA RK BETA EQCONSTR ICONSTR FVALUE
y = (x(1)-1)^2 + (x(2)-5)^2;
h(1) = 0.0;
g(1) = -x(1)^2 + x(2) -4;
g(2) = -(x(1)-2)^2 + x(2) -3;
EQCONSTR = h;
ICONSTR = g;
FVALUE = y;
y = y + LAMBDA.*EQCONSTR + RK.*EQCONSTR^2 + sum(BETA.*
max([ICONSTR; -BETA./(2*RK)])) + sum(RK*(max([ICONSTR;
-BETA./(2*RK)])).^2);
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% MATLAB code sqp.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% n_of_var -> number of design variables
% x = [-1.5 1.5] -> starting value of x
% epsilon1 -> constant used for terminating the algorithm
% delx -> required for gradient computation
% falpha_prev -> function value at first/previous iteration
% deriv -> gradient vector
% quadprog -> MATLAB function to solve quadratic programming
% LAMBDA -> Lagrange multipliers
%
clear all
clc
warning off
n_of_var = 2;
n_of_eqcons = 1;
n_of_iqcons = 1;
scale_factor = 10;
global LAMBDA RK BETA EQCONSTR ICONSTR FVALUE
LAMBDA = zeros(1,n_of_eqcons);
BETA = zeros(1,n_of_iqcons);
X = [10 -5];
RK = 1;
A = eye(length(X));
epsilon1 = 1e-6;
delx = 1e-3;
375Appendix B
checkconstr = zeros(1,n_of_iqcons);
fprintf(' No. x-vector f(x) |Cons.| \n')
fprintf('_________________________________________________\n')
checkconstr = zeros(1,n_of_iqcons);
for i = 1:3
deriv_f = grad_vec_f(X,delx,n_of_var,scale_factor);
sec_deriv_f = hessian(X,delx);
deriv_eqcon = grad_vec_eqcon(X,delx,n_of_eqcons);
deriv_ineqcon = grad_vec_ineqcon(X,delx,n_of_iqcons);
options = optimset('Display','off');
x = quadprog(sec_deriv_f,deriv_f,deriv_ineqcon,-ineqconstr_
val(X),deriv_eqcon,-eqconstr_val(X),[],[],X,options);
fprev = func_val(X);
X = X+x';
yyy = func_val(X);
LAMBDA = LAMBDA + 2*RK*EQCONSTR;
BETA = BETA + 2*RK*(max([ICONSTR; -BETA./(2*RK)]));
fnew = func_val(X);
checkconstr1 = max([ineqconstr_val(X);checkconstr]);
disp([i X FVALUE norm([checkconstr1 eqconstr_val(X)])]);
if abs(fnew-fprev) < epsilon1
break
end
end
fprintf('_________________________________________________\n')
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% MATLAB code func_val.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% computes augmented Lagrangian function value
%
function y = func_val(x)
global LAMBDA RK BETA EQCONSTR ICONSTR FVALUE
y = (x(1)-1)^2 + (x(2)-2)^2;
g = ineqconstr_val(x);
h = eqconstr_val(x);
EQCONSTR = h;
ICONSTR = g;
FVALUE = y;
y = y + LAMBDA*EQCONSTR' + RK*EQCONSTR*EQCONSTR' + sum(BETA.*
max([ICONSTR; -BETA./(2*RK)])) + sum(RK*(max([ICONSTR;
-BETA./(2*RK)])).^2);
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

Get Optimization now with the O’Reilly learning platform.

O’Reilly members experience books, live events, courses curated by job role, and more from O’Reilly and nearly 200 top publishers.