You are on page 1of 13

%Matlab code for Radial Basis Functions

clc;
x=-1:0.05:1;
%generating training data with Random Noise
for i=1:length(x)
y(i)=1.2*sin(pi*x(i))-cos(2.4*pi*x(i))+0.3*randn;
end
% Framing the interpolation matrix for training data
t=length(x);
for i=1:1:t
for j=1:1:t
h=x(i)-x(j);
k=h^2/.02;
train(i,j)=exp(-k);
end
end
W=inv(train)*y';
% Testing the trained RBF
xtest=-1:0.01:1;
%ytest is the desired output
ytest=1.2*sin(pi*xtest)-cos(2.4*pi*xtest);
% Framing the interpolation matrix for test data
t1=length(xtest);
t=length(x);
for i=1:1:t1
for j=1:1:t
h=xtest(i)-x(j);
k=h^2/.02;
test(i,j)=exp(-k);
end
end
actual_test=test*W;
% Plotting the Performance of the network
figure;
plot(xtest,ytest,'b-',xtest,actual_test,'r+');
xlabel('Xtest value');
ylabel('Ytest value');
h = legend('Desired output','Approximated curve',2);
set(h);
%Matlab code for Fixed Centres Selected at Random
clc;
% Training sampling 41 points in the range of [-1,1]
x=-1:0.05:1;
%generating training data
for i=1:length(x)
y(i)=1.2*sin(pi*x(i))-cos(2.4*pi*x(i))+0.3*randn;
end
% Generating 20 Fixed Random Centres from the samples
idx=randperm(numel(x));
l=x(idx(1:20));
t=sort(l);
centre=t';
% Framing the interpolation matrix for training data
t=length(x);
t1=length(centre);
for i=1:1:t
for j=1:1:t1
h=x(i)-centre(j);
k=h^2/0.02;
gtrain(i,j)=exp(-k);
end
end
I=eye(20);
lamda=0; % Regularization factor
W=inv((gtrain'*gtrain)+ lamda * I)*gtrain'*y';
% Testing the trained RBF
xtest=-1:0.01:1
%ytest is the desired output
for i=1:length(xtest)
ytest(i)=1.2*sin(pi*xtest(i))-cos(2.4*pi*xtest(i));
end
% Framing the interpolation matrix for test data
t2=length(xtest);
for i=1:1:t2
for j=1:1:t1
h=xtest(i)-centre(j);
k=h^2/.02;
gtest(i,j)=exp(-k);
end
end
dtest=gtest*W;
% Plotting the performance of the network
figure;
plot(xtest,ytest,'b-',xtest,dtest,'r+');
xlabel('Xtest value');
ylabel('Ytest value');
h = legend('Desired output','Approximated curve',2);
set(h);
%Matlab code for Radial Basis Functions
clc;
x=-1:0.05:1;
%generating training data with Random Noise
for i=1:length(x)
y(i)=1.2*sin(pi*x(i))-cos(2.4*pi*x(i))+0.3*randn;
end
% Framing the interpolation matrix for training data
t=length(x);
for i=1:1:t
for j=1:1:t
h=x(i)-x(j);
k=h^2/.02;
train(i,j)=exp(-k);
end
end
% Determining the weight matrix
I= eye(41);
q=0;
for h=1:10
lamda=h; % Regularization factor
W(:,h)=inv((train'*train)+ lamda * I)*train'*y';
end
% Testing the trained RBF
xtest=-1:0.01:1;
%ytest is the desired output
ytest=1.2*sin(pi*xtest)-cos(2.4*pi*xtest);
% Framing the interpolation matrix for test data
t1=length(xtest);
t=length(x);
for i=1:1:t1
for j=1:1:t
h=xtest(i)-x(j);
k=h^2/.02;
test(i,j)=exp(-k);
end
end
for h=1:10
actual_test(:,h)=test*W(:,h);
end
% Plotting the Performance of the network
figure;
for h=1:5
subplot(3,2,h);
plot(xtest,ytest,'b-',xtest,actual_test(:,h),'r+');
xlabel('Xtest value');
ylabel('Ytest value');
text(-0.9,1.5,['Regularization factor = ',num2str(h)]);
g = legend('Desired Output','Approximated curve',2);
set(g,'box','off');
end
figure;
m=0;
for h=1:5
m=5+h;
subplot(3,2,h);
plot(xtest,ytest,'b-',xtest,actual_test(:,m),'r+');
xlabel('Xtest value');
ylabel('Ytest value');
text(-0.9,1.5,['Regularization factor =',num2str(m)]);
g = legend('Desired Output','Approximated curve',2);
set(g,'box','off');
end
%Matlab code for Fixed Centres Selected at Random
clc;
% Training sampling 41 points in the range of [-1,1]
x=-1:0.05:1;
%generating training data
for i=1:length(x)
y(i)=1.2*sin(pi*x(i))-cos(2.4*pi*x(i))+0.3*randn;
end
% Generating 20 Fixed Random Centres from the samples
idx=randperm(numel(x));
l=x(idx(1:20));
t=sort(l);
centre=t';
% Framing the interpolation matrix for training data
t=length(x);
t1=length(centre);
for i=1:1:t
for j=1:1:t1
h=x(i)-centre(j);
k=h^2/0.02;
gtrain(i,j)=exp(-k);
end
end
I=eye(20);
for h=1:10
lamda=h; % Regularization factor
W(:,h)=inv((gtrain'*gtrain)+ lamda * I)*gtrain'*y';
end
% Testing the trained RBF
xtest=-1:0.01:1
%ytest is the desired output
for i=1:length(xtest)
ytest(i)=1.2*sin(pi*xtest(i))-cos(2.4*pi*xtest(i));
end
% Framing the interpolation matrix for test data
t2=length(xtest);
for i=1:1:t2
for j=1:1:t1
h=xtest(i)-centre(j);
k=h^2/.02;
gtest(i,j)=exp(-k);
end
end
for h=1:10
dtest(:,h)=gtest*W(:,h);
end
figure;
for h=1:5
subplot(3,2,h);
plot(xtest,ytest,'b-',xtest,dtest(:,h),'r+');
xlabel('Xtest value');
ylabel('Ytest value');
text(-0.9,1.5,['Regularization factor = ',num2str(h)]);
g = legend('Desired Output','Approximated curve',2);
set(g,'box','off');
end
figure;
m=0;
for h=1:5
m=5+h;
subplot(3,2,h);
plot(xtest,ytest,'b-',xtest,dtest(:,m),'r+');
xlabel('Xtest value');
ylabel('Ytest value');
text(-0.9,1.5,['Regularization factor =',num2str(m)]);
g = legend('Desired Output','Approximated curve',2);
set(g,'box','off');
end
% Self Organizing Map cluster and classify scene images
clc;
load Features_color_histogram;
% image_features_train
% scene_labels_train
% image_features_test
% scene_labels_test

% Framing the input matrix


x=zeros(60,5);
for i=1:length(x)
for j=1:4
x(i,j)=image_features_train(i,j);
end
j=j+1;
x(i,j)=scene_labels_train(:,i);
end
x=x';
% Assigning the input values of the network variables
InputSize=5;
NoofSamples=60;
OutputSizeX=10;
OutputSizeY=10;
N=1000;
% Initializing the parameters of the Kohonen Network
InitialLearningRate=0.1;
T2=N;
InitialEffectiveWidth=5;
T1=N/(log(InitialEffectiveWidth));

EffectiveWidth=InitialEffectiveWidth;
LearningRate=InitialLearningRate;
j=1;
% Getting the Coodinates for the output map
for row=1:OutputSizeX
for column=1:OutputSizeY
MapX(j)=row;
MapY(j)=column;
j=j+1;
end
end
% Assigning initial weights for synapses
NumberOfNodes=OutputSizeX*OutputSizeY;
w=rand(InputSize,NumberOfNodes);
% Iterating through 1000 iterations
for epoch=1:N
% Drawing a sample vector
r=randint(1,1,[1 60]);
x(:,r); %Randomly drawn vector
%competition phase
%Determining the winner neuron
for j=1:NumberOfNodes
Eucld(j)=norm(x(:,r)-w(:,j));
end
[y,v]=min(Eucld);
winner=v;%Winning Neuron
% Co-operation and adaptaion phase
for j=1:NumberOfNodes
d=sqrt((MapX(j)-MapX(winner))^2+(MapY(j)-MapY(winner))^2);
h=exp(-(d^2)/(2*(EffectiveWidth^2)));
w(:,j)=w(:,j)+LearningRate*h*(x(:,r)-w(:,j));
end
%Varying the learning Rate and Effective width for every epoch
LearningRate=InitialLearningRate*exp(-epoch/T2);
EffectiveWidth=InitialEffectiveWidth*exp(-epoch/T1);
end
% Framing the SOM Output Map
for i=1:NoofSamples
% Determining the winner neuron
for j=1:NumberOfNodes
Eucld(j)=norm(x(:,i)-w(:,j));
scene(j)=w(5,j);
end
[y1,v1]=min(Eucld);
label1(i)=round(scene(v1));
winner1(i)=v1;
end
% Plotting the performance of the network
figure;
for i=1:NoofSamples
plot(i,winner1(i),'*')
text(i+0.5,winner1(i),num2str(label1(i)));
xlabel('No of samples')
ylabel('Output neurons')
h = legend('Winner neuron',2);
set(h,'box','off');
grid on;
hold on;
end
% Self Organizing Map cluster and classify scene images
clc;
load Features_color_histogram;
% image_features_train
% scene_labels_train
% image_features_test
% scene_labels_test

% Framing the input matrix


k=1;
x=zeros(60,5);
for i=1:length(x)
for j=1:4
x(i,j)=image_features_train(i,j);
end
j=j+1;
x(i,j)=k;
k=k+1;
end
x=x';
% Assigning the input values of the network variables
InputSize=5;
NoofSamples=60;
OutputSizeX=10;
OutputSizeY=10;
N=1000;
% Initializing the parameters of the Kohonen Network
InitialLearningRate=0.1;
T2=N;
InitialEffectiveWidth=5;
T1=N/(log(InitialEffectiveWidth));

EffectiveWidth=InitialEffectiveWidth;
LearningRate=InitialLearningRate;
j=1;
% Getting the Coodinates for the output map
for row=1:OutputSizeX
for column=1:OutputSizeY
MapX(j)=row;
MapY(j)=column;
j=j+1;
end
end
% Assigning initial weights for synapses
NumberOfNodes=OutputSizeX*OutputSizeY;
w=rand(InputSize,NumberOfNodes);
% Iterating through 1000 iterations
for epoch=1:N
% Drawing a sample vector
r=randint(1,1,[1 60]);
x(:,r); %Randomly drawn vector
%competition phase
%Determining the winner neuron
for j=1:NumberOfNodes
Eucld(j)=norm(x(:,r)-w(:,j));
end
[y,v]=min(Eucld);
winner=v;%Winning Neuron
% Co-operation and adaptaion phase
for j=1:NumberOfNodes
d=sqrt((MapX(j)-MapX(winner))^2+(MapY(j)-MapY(winner))^2);
h=exp(-(d^2)/(2*(EffectiveWidth^2)));
w(:,j)=w(:,j)+LearningRate*h*(x(:,r)-w(:,j));
end
%Varying the learning Rate and Effective width for every epoch
LearningRate=InitialLearningRate*exp(-epoch/T2);
EffectiveWidth=InitialEffectiveWidth*exp(-epoch/T1);
end
% Framing the SOM Output Map
for i=1:NoofSamples
% Determining the winner neuron
for j=1:NumberOfNodes
Eucld(j)=norm(x(:,i)-w(:,j));
scene(j)=w(5,j);
end
[y1,v1]=min(Eucld);
label1(i)=round(scene(v1));
winner1(i)=v1;
winner2(i)=w(5,v1);
% Framing the SOM Output Map
for i=1:NumberOfNodes
for j=1:NoofSamples
Eucld(j)=norm(x(:,j)-w(:,i));
scene(j)=x(5,j);
end
[y1,v1]=min(Eucld);
label1(i)=round(scene(v1));
winner1(i)=v1;
end
end
%mapping 60 sample to output layer of SOM (10X10)
j=1;
for row=1:OutputSizeX
for column=1:OutputSizeY
indices_display(row,column)=winner1(j);
j=j+1;
end
end
% Plotting semantic map for the texture images
DisplayImageMatrix(indices_display);
% Self Organizing Map cluster and classify scene images
clc;

load Features_color_histogram;
% image_features_train
% scene_labels_train
% image_features_test
% scene_labels_test

% Framing the input matrix


x=zeros(60,5);
for i=1:length(x)
for j=1:4
x(i,j)=image_features_train(i,j);
end
j=j+1;
x(i,j)=scene_labels_train(:,i);
end
x=x';
% Assigning the input values of the network variables
InputSize=5;
NoofSamples=60;
OutputSizeX=10;
OutputSizeY=10;
N=1000;

% Initializing the parameters of the Kohonen Network


InitialLearningRate=0.1;
T2=N;
InitialEffectiveWidth=5;
T1=N/(log(InitialEffectiveWidth));

EffectiveWidth=InitialEffectiveWidth;
LearningRate=InitialLearningRate;
j=1;

% Getting the Coodinates for the output map


for row=1:OutputSizeX
for column=1:OutputSizeY
MapX(j)=row;
MapY(j)=column;
j=j+1;
end
end

% Assigning initial weights for synapses


NumberOfNodes=OutputSizeX*OutputSizeY;
w=rand(InputSize,NumberOfNodes);

% Iterating through 1000 iterations


for epoch=1:N
% Drawing a sample vector
r=randint(1,1,[1 60]);
x(:,r); %Randomly drawn vector

%competition phase
%Determining the winner neuron
for j=1:NumberOfNodes
Eucld(j)=norm(x(:,r)-w(:,j));
end
[y,v]=min(Eucld);
winner=v;%Winning Neuron

% Co-operation and adaptaion phase


for j=1:NumberOfNodes
d=sqrt((MapX(j)-MapX(winner))^2+(MapY(j)-MapY(winner))^2);
h=exp(-(d^2)/(2*(EffectiveWidth^2)));
w(:,j)=w(:,j)+LearningRate*h*(x(:,r)-w(:,j));
end

%Varying the learning Rate and Effective width for every epoch
LearningRate=InitialLearningRate*exp(-epoch/T2);
EffectiveWidth=InitialEffectiveWidth*exp(-epoch/T1);
end

y=zeros(30,5);
for i=1:length(y)
for j=1:4
y(i,j)=image_features_test(i,j);
end
j=j+1;
y(i,j)=scene_labels_test(:,i);
end
y=y';
NoofSamples=30;
% Framing the SOM Output Map
for i=1:NoofSamples

% Determining the winner neuron


for j=1:NumberOfNodes
Eucld(j)=norm(y(:,i)-w(:,j));
scene(j)=w(5,j);
end
[y2,v2]=min(Eucld);
label2(i)=round(scene(v2));
winner2(i)=v2;
recognition_rate(i)= label2(i)-scene_labels_test(i);
end

% Plotting the performance of the network


figure;
subplot(2,1,1)
for i=1:NoofSamples
plot(i,winner2(i),'*')
text(i+0.5,winner2(i),num2str(label2(i)));
xlabel('No of samples')
ylabel('Output neurons')
h = legend('Winner neuron',2);
set(h,'box','on');
grid on;
hold on;
end
subplot(2,1,2)
for i=1:NoofSamples
plot(i,recognition_rate(i),'*')
xlabel('No of samples')
ylabel('Recognition error')
h = legend('Recognition error',2);
set(h,'box','on');
hold on;
end

You might also like