Published
Edited
Jun 6, 2022
1 fork
Insert cell
# Neural Networks Take 2 - XOR net
Insert cell
class Neuron {
constructor(layer_i,neuron_i){
this.layer_i = layer_i;
this.neuron_i = neuron_i;
this.weights = []
this.deltas = []
this.previousDeltas = []
this.input = null;
this.output = null;
this.gradient = 0;
this.momentum = 0.7;
this.bias = 1; //Math.random() * 2 - 1
}
activate(input){
if(this.layer_i === 0 || !this.weights.length){
this.input = input[this.neuron_i]
this.output = this.input
return this.output;
}
let sum = 0;
for(let i in input){
sum+= input[i] * (this.weights.length ? this.weights[i] : 1)
}
if(this.weights.length){
sum+=this.bias;
}
this.input = sum;
// sigmoid activation function
this.output = 1 / (1 + Math.exp(-1 * sum))
return this.output;
}
}
Insert cell
class Layer {
neurons = []

constructor(layer_i,neuron_count){
this.layer_i = layer_i
//console.log('New Layer',{neuron_count})
for(let i = 0; i < neuron_count; i++){
this.neurons.push(new Neuron(this.layer_i,i))
}
}

activate(input){
let result = []
for(let n in this.neurons){
let neuron = this.neurons[n]
// push the result value into an output array
result[n] = neuron.activate(input)
}
return result
}
}
Insert cell
class Network {
layers = [];
sigError = 1;
epoch = 0;

constructor(layers_neurons){
for(let i in layers_neurons){
let neuron_count = layers_neurons[i]
this.layers.push(new Layer(i,neuron_count))
let this_layer = this.layers[this.layers.length-1]
let prev_layer = this.layers[this.layers.length-2]
if(!prev_layer) continue;
for(let j = 0; j<neuron_count; j++){
// connect each neuron in the new layer to the previous layer
// this connection is represented by a weight (signed floating point)
for(let k = 0; k<prev_layer.neurons.length; k++){
/* this.debug={
j,k,prev_layer
} */
// if(!this_layer.neurons[j]){
// this_layer.neurons[j] = new Neuron()
// }
this_layer.neurons[j].weights[k] = Math.random() * 2 - 1
}
}
}
}

input(input){
// let first_layer = this.layers[0]
// for(let i = 0; i < first_layer.neurons.length; i++){
// first_layer.neurons[i].output = input[i]
// }
// for(let i = 1; i<this.layers.length; i++){
// this.layers[i].activate()
// }
let result = input.slice();
//let last_layer = this.layers[this.layers.length-1]
// for(let n of last_layer.neurons){
// output.push(n.output)
// }
// feed input into the first layer,
// then feed the result of each layer into the next layer
for(let i = 0; i<this.layers.length; i++){
result = this.layers[i].activate(result)
}
return result
}

// generator
*train(tests,ideals){
this.epoch = 0;
this.error_over_time = [];
let learningRate = 0.3;
let momentum = 0.3;
let minError = 0.01;
let maxEpochs = 30 * 1000;
this.sigError = 1;
let _this = this;
this.deltas = []
this.consec_valid = 0;
this.valid_over_time = [];
this.min_over_time = [];
this.passed = false;
let loop = true;
while(loop){
if(this.epoch > maxEpochs){
break;
}
//this.sigError = 0;
this.samples = []
for(let i = 0; i < 1; i++){
// start with 0 error
this.sigError=0

// pick a random test/ideal pair (probably could just loop thru them in order?)
// but i think picking at random is what makes it *stochastic* gradient descent
let rand = Math.round(Math.random() * 3)
let sample_idx = rand; //this.epoch % 3; //rand;
let test_input = tests[sample_idx]
let ideal_output = ideals[sample_idx]
// ask the network to predict output for the given test input
this.input(test_input);

// grab the last layer for easy reference
let last_layer = this.layers[this.layers.length-1]
// loop backward thru our layers last to first and figure gradients
for(let k = this.layers.length-1; k>=0; k--){
if(k === this.layers.length-1){
// if we're on the last layer; the gradient is the squared diff of ideal minus given prediction
for(let j in last_layer.neurons){
j = parseInt(j) // :G
let neuron = last_layer.neurons[j]
let output = neuron.output
let delta = ideal_output[j] - output
neuron.gradient = output * (1 - output) * (delta)
this.sigError += Math.pow(delta, 2)
}
}else{
// otherwise, if we're NOT on the last layer, we need to do a little more work to calculate the gradient
let hidden_layer = this.layers[k]
// grab a reference the layer AFTER the layer we're calculating
let following_layer = this.layers[k+1]
// for each of the neurons in the given layer,
// reach into the FOLLOWING layer, and get the weight for the given layer
// we sum up an error for the given layer neuron
// as a total of the weights * gradient for the following layer's referencing neruons
for(let n in hidden_layer.neurons){
let neuron = hidden_layer.neurons[n]
let output = neuron.output
let error = 0;
// for every neuron in the layer following this one,
// increase error contribution by the sum of the weights
// that represent how much input we have on that neuron in the following layer
// and... ease that amount further by adjusting it to be
// proportional of that other layer's neurons OWN gradient (proportional contribution)
// (chain rule // backpropagation // local data, global results)
for(let n2 in following_layer.neurons){
let following_neuron = following_layer.neurons[n2]
error += following_neuron.weights[n] * following_neuron.gradient
}
neuron.gradient = output * (1 - output) * error
} // end for neurons
} // end else
} // end for layers
// Once all gradients are calculated, work forward and calculate
// the new weights. w = w + (lr * df/de * in)
for(let i = 0; i < this.layers.length; i++) {
// For each neuron in each layer, ...
let layer = this.layers[i]
for(let j = 0; j < layer.neurons.length; j++) {
let neuron = layer.neurons[j];
// Modify the bias.
neuron.bias += learningRate * neuron.gradient;
// For each weight, ...
for(let k = 0; k < neuron.weights.length; k++) {
// Modify the weight by multiplying the weight by the
// learning rate and the output of the corresponding preceding neuron.
let prev_layer = this.layers[i-1]
if(typeof prev_layer === 'undefined'){
prev_layer = this.layers[0]; // input layer
}
neuron.deltas[k] = learningRate * neuron.gradient * prev_layer.neurons[k].output
neuron.weights[k] += neuron.deltas[k];
// use momentum
//console.log(i,j,k,neuron.previousDeltas[k])
if(neuron.previousDeltas[k]){
neuron.weights[k] += momentum * neuron.previousDeltas[k];
}
}
// Set previous delta values.
neuron.previousDeltas = neuron.deltas.slice();
} // end for layers
} // end for samples

// validate
this.passed = true;
for(let i = 0; i<4; i++){
let output = this.input(tests[i]);
if(Math.round(output[0]) != ideals[i][0]){
this.passed = false;
}
}
if(this.passed){
this.consec_valid++;
}else{
this.consec_valid = 0;
}
this.valid_over_time.push(this.consec_valid);
this.epoch++;
this.error_over_time
.push(this.sigError)
this.min_over_time.push(Math.min(...this.error_over_time));
loop = this.sigError > minError || this.consec_valid < 3
if(this.consec_valid >= 3){
loop = false;
break;
}
yield this
} // end while
yield this
}
}
}
Insert cell
network = new Network([2,4,1]) // xor logic
Insert cell
function* buildTrainTest(network){
//let network = spark.network;// new Network([2,4,1])
// xor
let training = [[0,0],[0,1],[1,0],[1,1]]
let ideals = [[0],[1],[1],[0]]
let validation = []
for(let step of network.train(training,ideals)){
network = step;
// validate
validation = []
// for(let i in training){
// validation.push([training[i][0],training[i][1],ideals[i][0],Math.round(network.input(i)[0])])
// }
yield {network,validation}
}
//yield {network,validation}
return {network,validation}
}
Insert cell
results = (function*(){
for(let step of buildTrainTest(network)){
yield step
}
})()
Insert cell
sparkline(results.network.error_over_time, 800, 50)
Insert cell
sparkline(results.network.min_over_time, 800, 50)
Insert cell
sparkline(results.network.valid_over_time, 800, 50)
Insert cell
Math.max(...results.network.valid_over_time) // max consec valid
Insert cell
Math.min(...results.network.error_over_time) // min error
Insert cell
html `${visualizeNetwork(results.network,[0,0])}
${ visualizeNetwork(results.network,[0,1])}
<br> ${ visualizeNetwork(results.network,[1,0])} ${ visualizeNetwork(results.network,[1,1])}`
Insert cell
test = (function(){
return [
results.network.input([0,0]),
results.network.input([0,1]),
results.network.input([1,0]),
results.network.input([1,1])
]
//return network.layers[0].neurons.map(n=>n.input)
})()
Insert cell
Color = import("https://colorjs.io/dist/color.esm.js").then((m)=>{return m.default})
Insert cell
function map_range(value, low1, high1, low2, high2){
return low2 + (high2 - low2) * (value - low1) / (high1 - low1)
}
Insert cell
Insert cell
Insert cell

Purpose-built for displays of data

Observable is your go-to platform for exploring data and creating expressive data visualizations. Use reactive JavaScript notebooks for prototyping and a collaborative canvas for visual data exploration and dashboard creation.
Learn more