gusucode.com > ​遗传算法与神经网络混合的算法程序、粒子群优化与神经网络混合的算法程序 > GA_BP-pso_bp/psobp.txt

    function psobp 
% BP neural network trained by PSO algorithm
% Copyright by Deng Da-Peng @ 2005
% Email: rexdeng@163.com
% You can change and distribute this code freely for academic usage
% Business usage is strictly prohibited
clc
clear all

AllSamIn=...; % Add your all input data
AllSamOut-...; % Add your all output data 

% Pre-processing data with premnmx, you can use other functions
global minAllSamOut;
global maxAllSamOut;
[AllSamInn,minAllSamIn,maxAllSamIn,AllSamOutn,minAllSamOut,maxAllSamOut] = premnmx(AllSamIn,AllSamOut);
% draw 10 percent from all samples as testing samples,the rest as training samples
i=[10:10:1000];
TestSamIn=[];
TestSamOut=[];
for j=1:100
TestSamIn=[TestSamIn,AllSamInn(:,i(j))]; 
TestSamOut=[TestSamOut,AllSamOutn(:,i(j))];
end
TargetOfTestSam=...; % add reall output of testing samples
TrainSamIn=AllSamInn;
TrainSamOut=AllSamOutn;
TrainSamIn(:,i)=[];
TrainSamOut(:,i)=[];
% Evaluating Sample
EvaSamIn=...
EvaSamInn=tramnmx(EvaSamIn,minAllSamIn,maxAllSamIn); % preprocessing

global Ptrain;
Ptrain = TrainSamIn;
global Ttrain;
Ttrain = TrainSamOut;

Ptest = TestSamIn;
Ttest = TestSamOut;

% Initialize BPN parameters
global indim;
indim=5;
global hiddennum;
hiddennum=3;
global outdim;
outdim=1;

% Initialize PSO parameters
vmax=0.5; % Maximum velocity
minerr=0.001; % Minimum error
wmax=0.90;
wmin=0.30;
global itmax; %Maximum iteration number
itmax=300;
c1=2;
c2=2;
for iter=1:itmax
W(iter)=wmax-((wmax-wmin)/itmax)*iter; % weight declining linearly
end 
% particles are initialized between (a,b) randomly
a=-1; 
b=1;
%Between (m,n), (which can also be started from zero)
m=-1;
n=1;
global N; % number of particles
N=40;
global D; % length of particle
D=(indim+1)*hiddennum+(hiddennum+1)*outdim;
% Initialize positions of particles
rand('state',sum(100*clock));
X=a+(b-a)*rand(N,D,1);
%Initialize velocities of particles
V=m+(n-m)*rand(N,D,1);

global fvrec;
MinFit=[];
BestFit=[];

%Function to be minimized, performance function,i.e.,mse of net work
global net;
net=newff(minmax(Ptrain),[hiddennum,outdim],{'tansig','purelin'});
fitness=fitcal(X,net,indim,hiddennum,outdim,D,Ptrain,Ttrain,minAllSamOut,maxAllSamOut);
fvrec(:,1,1)=fitness(:,1,1);
[C,I]=min(fitness(:,1,1));
MinFit=[MinFit C];
BestFit=[BestFit C];
L(:,1,1)=fitness(:,1,1); %record the fitness of particle of every iterations
B(1,1,1)=C; %record the minimum fitness of particle
gbest(1,:,1)=X(I,:,1); %the global best x in population

%Matrix composed of gbest vector 
for p=1:N 
G(p,:,1)=gbest(1,:,1);
end
for i=1:N;
pbest(i,:,1)=X(i,:,1);
end
V(:,:,2)=W(1)*V(:,:,1)+c1*rand*(pbest(:,:,1)-X(:,:,1))+c2*rand*(G(:,:,1)-X(:,:,1));
%V(:,:,2)=cf*(W(1)*V(:,:,1)+c1*rand*(pbest(:,:,1)-X(:,:,1))+c2*rand*(G(:,:,1)-X(:,:,1)));
%V(:,:,2)=cf*(V(:,:,1)+c1*rand*(pbest(:,:,1)-X(:,:,1))+c2*rand*(G(:,:,1)-X(:,:,1)));
% limits velocity of particles by vmax
for ni=1:N
for di=1:D
if V(ni,di,2)>vmax
V(ni,di,2)=vmax;
elseif V(ni,di,2)<-vmax
V(ni,di,2)=-vmax;
else
V(ni,di,2)=V(ni,di,2);
end
end
end 
X(:,:,2)=X(:,:,1)+V(:,:,2);
%******************************************************
for j=2:itmax 
disp('Iteration and Current Best Fitness')
disp(j-1)
disp(B(1,1,j-1))
% Calculation of new positions 
fitness=fitcal(X,net,indim,hiddennum,outdim,D,Ptrain,Ttrain,minAllSamOut,maxAllSamOut);
fvrec(:,1,j)=fitness(:,1,j);
%[maxC,maxI]=max(fitness(:,1,j));
%MaxFit=[MaxFit maxC];
%MeanFit=[MeanFit mean(fitness(:,1,j))];
[C,I]=min(fitness(:,1,j));
MinFit=[MinFit C]; 
BestFit=[BestFit min(MinFit)];
L(:,1,j)=fitness(:,1,j);
B(1,1,j)=C;
gbest(1,:,j)=X(I,:,j);
[C,I]=min(B(1,1,:));
% keep gbest is the best particle of all have occured
if B(1,1,j)<=C
gbest(1,:,j)=gbest(1,:,j); 
else
gbest(1,:,j)=gbest(1,:,I);
end 
if C<=minerr, break, end
%Matrix composed of gbest vector 
if j>=itmax, break, end
for p=1:N
G(p,:,j)=gbest(1,:,j);
end
for i=1:N;
[C,I]=min(L(i,1,:));
if L(i,1,j)<=C
pbest(i,:,j)=X(i,:,j);
else
pbest(i,:,j)=X(i,:,I);
end
end
V(:,:,j+1)=W(j)*V(:,:,j)+c1*rand*(pbest(:,:,j)-X(:,:,j))+c2*rand*(G(:,:,j)-X(:,:,j));
%V(:,:,j+1)=cf*(W(j)*V(:,:,j)+c1*rand*(pbest(:,:,j)-X(:,:,j))+c2*rand*(G(:,:,j)-X(:,:,j)));
%V(:,:,j+1)=cf*(V(:,:,j)+c1*rand*(pbest(:,:,j)-X(:,:,j))+c2*rand*(G(:,:,j)-X(:,:,j)));
for ni=1:N
for di=1:D
if V(ni,di,j+1)>vmax
V(ni,di,j+1)=vmax;
elseif V(ni,di,j+1)<-vmax
V(ni,di,j+1)=-vmax;
else
V(ni,di,j+1)=V(ni,di,j+1);
end
end
end 
X(:,:,j+1)=X(:,:,j)+V(:,:,j+1);
end
disp('Iteration and Current Best Fitness')
disp(j)
disp(B(1,1,j))
disp('Global Best Fitness and Occurred Iteration')
[C,I]=min(B(1,1,:))
% simulation network
for t=1:hiddennum
x2iw(t,:)=gbest(1,((t-1)*indim+1):t*indim,j);
end
for r=1:outdim
x2lw(r,:)=gbest(1,(indim*hiddennum+1):(indim*hiddennum+hiddennum),j);
end
x2b=gbest(1,((indim+1)*hiddennum+1):D,j);
x2b1=x2b(1:hiddennum).';
x2b2=x2b(hiddennum+1:hiddennum+outdim).';
net.IW{1,1}=x2iw;
net.LW{2,1}=x2lw;
net.b{1}=x2b1;
net.b{2}=x2b2;

nettesterr=mse(sim(net,Ptest)-Ttest);
testsamout = postmnmx(sim(net,Ptest),minAllSamOut,maxAllSamOut);
realtesterr=mse(testsamout-TargetOfTestSam)
EvaSamOutn = sim(net,EvaSamInn);
EvaSamOut = postmnmx(EvaSamOutn,minAllSamOut,maxAllSamOut);

figure(1)
grid
hold on
plot(log(BestFit),'r');

figure(2) 
grid
hold on
plot(EvaSamOut,'k');
save er net nettesterr realtesterr B fvrec EvaSamOut

%sub function for getting fitness of all paiticles in specific generation
%change particle to weight matrix of BPN,then calculate training error 
function fitval = fitcal(pm,net,indim,hiddennum,outdim,D,Ptrain,Ttrain,minAllSamOut,maxAllSamOut) 
[x,y,z]=size(pm);
for i=1:x
for j=1:hiddennum
x2iw(j,:)=pm(i,((j-1)*indim+1):j*indim,z);
end
for k=1:outdim
x2lw(k,:)=pm(i,(indim*hiddennum+1):(indim*hiddennum+hiddennum),z);
end
x2b=pm(i,((indim+1)*hiddennum+1):D,z);
x2b1=x2b(1:hiddennum).';
x2b2=x2b(hiddennum+1:hiddennum+outdim).';
net.IW{1,1}=x2iw;
net.LW{2,1}=x2lw;
net.b{1}=x2b1;
net.b{2}=x2b2;
error=sim(net,Ptrain)-Ttrain;
fitval(i,1,z)=mse(error);
end