You are on page 1of 9

XOR -- Sage

http://localhost:8080/home/admin/2/print

XOR
# Code from Chapter 3 of Machine Learning: An Algorithmic
Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any
way you wish for
# non-commercial purposes, but please maintain the name of the
original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
import numpy as np
class pcn:
""" A basic Perceptron (the same pcn.py except with the
weights printed
and it does not reorder the inputs)"""
def __init__(self,inputs,targets):
""" Constructor """
# Set up network size
if np.ndim(inputs)>1:
self.nIn = np.shape(inputs)[1]
else:
self.nIn = 1
if np.ndim(targets)>1:
self.nOut = np.shape(targets)[1]
else:
self.nOut = 1
self.nData = np.shape(inputs)[0]
# Initialise network
self.weights =
np.random.rand(self.nIn+1,self.nOut)*0.1-0.05
def pcntrain(self,inputs,targets,eta,nIterations):
""" Train the thing """
1 of 9

20150530 13:26

XOR -- Sage

http://localhost:8080/home/admin/2/print

# Add the inputs that match the bias node


inputs = np.concatenate((inputs,np.ones((self.nData,1))),axis=1)
# Training
change = range(self.nData)
for n in range(nIterations):
self.activations = self.pcnfwd(inputs);
self.weights -=
eta*np.dot(np.transpose(inputs),self.activations-targets)
print "Iteration: ", n
print self.weights
activations = self.pcnfwd(inputs)
print "Final outputs are:"
print activations
#return self.weights
def pcnfwd(self,inputs):
""" Run the network forward """
# Compute activations
activations = np.dot(inputs,self.weights)
# Threshold the activations
return np.where(activations>0,1,0)
def confmat(self,inputs,targets):
"""Confusion matrix"""
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,np.ones((self.nData,1))),axis=1)
outputs = np.dot(inputs,self.weights)
nClasses = np.shape(targets)[1]
if nClasses==1:
nClasses = 2
outputs = np.where(outputs>0,1,0)
else:
# 1-of-N encoding
outputs = np.argmax(outputs,1)

2 of 9

20150530 13:26

XOR -- Sage

http://localhost:8080/home/admin/2/print

targets = np.argmax(targets,1)
cm = np.zeros((nClasses,nClasses))
for i in range(nClasses):
for j in range(nClasses):
cm[i,j] =
np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))
print cm
print np.trace(cm)/np.sum(cm)
inputs = np.array([[0,0],[0,1],[1,0],[1,1]])
targets=np.array([[0],[1],[1],[0]])
p=pcn(inputs,targets)
p.pcntrain(inputs,targets, 0.25,6)
Iteration: 0
[[ 0.04424994]
[ 0.20475572]
[-0.22777382]]
Final outputs are:
[[1]
[1]
[1]
[1]]
Iteration: 1
[[-0.20575006]
[-0.04524428]
[ 0.27222618]]
Final outputs are:
[[0]
[0]
[0]
[0]]
Iteration: 2
[[ 0.04424994]
[ 0.20475572]
[-0.22777382]]
Final outputs are:
[[1]
[1]
[1]
[1]]
Iteration: 3

3 of 9

20150530 13:26

XOR -- Sage

http://localhost:8080/home/admin/2/print

[[-0.20575006]
[-0.04524428]
[ 0.27222618]]
Final outputs are:
[[0]
[0]
[0]
[0]]
Iteration: 4
[[ 0.04424994]
[ 0.20475572]
[-0.22777382]]
Final outputs are:
[[1]
[1]
[1]
[1]]
Iteration: 5
[[-0.20575006]
[-0.04524428]
[ 0.27222618]]
Final outputs are:
[[0]
[0]
[0]
[0]]
# (0,0,0,0)3

# Code from Chapter 3 of Machine Learning: An Algorithmic


Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any
way you wish for
# non-commercial purposes, but please maintain the name of the
original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
import numpy as np

4 of 9

20150530 13:26

XOR -- Sage

http://localhost:8080/home/admin/2/print

class pcn:
""" A basic Perceptron (the same pcn.py except with the
weights printed
and it does not reorder the inputs)"""
def __init__(self,inputs,targets):
""" Constructor """
# Set up network size
if np.ndim(inputs)>1:
self.nIn = np.shape(inputs)[1]
else:
self.nIn = 1
if np.ndim(targets)>1:
self.nOut = np.shape(targets)[1]
else:
self.nOut = 1
self.nData = np.shape(inputs)[0]
# Initialise network
self.weights =
np.random.rand(self.nIn+1,self.nOut)*0.1-0.05
def pcntrain(self,inputs,targets,eta,nIterations):
""" Train the thing """
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,np.ones((self.nData,1))),axis=1)
# Training
change = range(self.nData)
for n in range(nIterations):
self.activations = self.pcnfwd(inputs);
self.weights -=
eta*np.dot(np.transpose(inputs),self.activations-targets)
print "Iteration: ", n
print self.weights
activations = self.pcnfwd(inputs)
print "Final outputs are:"
print activations
#return self.weights

5 of 9

20150530 13:26

XOR -- Sage

http://localhost:8080/home/admin/2/print

def pcnfwd(self,inputs):
""" Run the network forward """
# Compute activations
activations = np.dot(inputs,self.weights)
# Threshold the activations
return np.where(activations>0,1,0)
def confmat(self,inputs,targets):
"""Confusion matrix"""
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,np.ones((self.nData,1))),axis=1)
outputs = np.dot(inputs,self.weights)
nClasses = np.shape(targets)[1]
if nClasses==1:
nClasses = 2
outputs = np.where(outputs>0,1,0)
else:
# 1-of-N encoding
outputs = np.argmax(outputs,1)
targets = np.argmax(targets,1)
cm = np.zeros((nClasses,nClasses))
for i in range(nClasses):
for j in range(nClasses):
cm[i,j] =
np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))
print cm
print np.trace(cm)/np.sum(cm)
inputs = np.array([[0,0,1],[0,1,0],[1,0,0],[1,1,0]])
targets = np.array([[0],[1],[1],[0]])
#p=pcn(inputs,targets)
p.pcntrain(inputs,targets, 0.25,15)
WARNING: Output truncated!
full_output.txt

6 of 9

20150530 13:26

XOR -- Sage

http://localhost:8080/home/admin/2/print

Iteration: 0
[[-0.49488647]
[-0.48800173]
[-0.97720969]
[-0.48311308]]
Final outputs are:
[[0]
[0]
[0]
[0]]
Iteration: 1
[[-0.24488647]
[-0.23800173]
[-0.97720969]
[-0.98311308]]
Final outputs are:
[[1]
[1]
[1]
[1]]
Iteration: 2
[[-0.49488647]
[-0.48800173]
[-1.22720969]
[-0.48311308]]
Final outputs are:
[[0]
[0]
[0]
[0]]
Iteration: 3
[[-0.24488647]
[-0.23800173]
[-1.22720969]
[-0.98311308]]
Final outputs are:
[[0]
[1]
[1]
[1]]
Iteration: 4

7 of 9

20150530 13:26

XOR -- Sage

http://localhost:8080/home/admin/2/print

[[-0.49488647]
[-0.48800173]
[-1.22720969]
[-0.73311308]]
Final outputs are:
[[0]
[1]
[1]
[0]]
Iteration: 5
[[-0.49488647]
[-0.48800173]
[-1.22720969]
[-0.73311308]]
Final outputs are:
[[0]
[1]
[1]
...
Iteration: 9
[[-0.49488647]
[-0.48800173]
[-1.22720969]
[-0.73311308]]
Final outputs are:
[[0]
[1]
[1]
[0]]
Iteration: 10
[[-0.49488647]
[-0.48800173]
[-1.22720969]
[-0.73311308]]
Final outputs are:
[[0]
[1]
[1]
[0]]
Iteration: 11
[[-0.49488647]

8 of 9

20150530 13:26

XOR -- Sage

http://localhost:8080/home/admin/2/print

[-0.48800173]
[-1.22720969]
[-0.73311308]]
Final outputs are:
[[0]
[1]
[1]
[0]]
Iteration: 12
[[-0.49488647]
[-0.48800173]
[-1.22720969]
[-0.73311308]]
Final outputs are:
[[0]
[1]
[1]
[0]]
Iteration: 13
[[-0.49488647]
[-0.48800173]
[-1.22720969]
[-0.73311308]]
Final outputs are:
[[0]
[1]
[1]
[0]]
Iteration: 14
[[-0.49488647]
[-0.48800173]
[-1.22720969]
[-0.73311308]]
Final outputs are:
[[0]
[1]
[1]
[0]]
full_output.txt
#

9 of 9

20150530 13:26

You might also like