Public
Edited
Oct 2, 2021
1 fork
2 stars
Insert cell
Insert cell
Insert cell
Insert cell
Insert cell
Insert cell
Insert cell
Insert cell
Insert cell
Insert cell
net = {
const run_duration = 5000000; // 1000003;
const net_size = 2;
const step_size = 1; // why not 0.01 per Python, maths is off somewhere

//0.01; // orig: 0.01

let weights = await tf.tensor2d([
[node0, 1],
[-1, node1]
]);
// 2x2 net: its self links are 4.5, and the crossover links are +/- 1

const taus = await tf.tensor1d([tau0, 1]);
const gains = await tf.tensor1d([1, 1.1], tf.int32);
const biases = await tf.tensor1d([-2.75, -1.75]);

let ext_inputs = await tf.tensor1d([0, 0]);
// let ext_inputs = tf.zeros([0, 2], tf.int32);
let states = await tf.tensor1d([2, 3]);
let outputs = await tf.tensor1d([0.2, 0.3]);

let loopcount = 1;

// todo: check the math
// this steps more slowly than the py lib

// let batches =[]
// for (let ix = 0; i < 100; i++) {
while (loopcount++ < run_duration) {
// τy' = -y + Wσ(g(y+ θ)) + I

/* tf.tody executes the provided function fn and after it is executed, cleans up all intermediate tensors allocated by fn except those returned by fn */
const calc = await tf.tidy(() => {
let total_inputs = weights.dot(outputs); // Wo

let biased_states = total_inputs.add(biases); // (Wo)+ϑ

let gained_biased_states = biased_states.mul(gains); // g((Wo)+ϑ)

// y' = -y +g(Wo+ϑ)
let yprime = gained_biased_states.sub(states).div(taus);

// Euler step:
let delta = yprime.mul(tf.scalar(step_size));

let trash = states;
states = states.add(delta);
trash.dispose();

outputs = tf.sigmoid(states.transpose()); //σ
return { states, outputs };
});

amon.log("loopcount: ", loopcount);

yield {
state: states,
count: loopcount,
weights: weights,
outputs: outputs,
inputs: ext_inputs
};
}
// } // end for batch
}
Insert cell
Insert cell
/* rough cut at generating graphviz dot visualization
based on network topology, weights, activation. For now, massively hardcoded.
Could also use D3 force layout. */

hardcodedGraphviz = {
let n0 = ppfloat(activations[0]);
let n1 = ppfloat(activations[1]);

const steelblue_light = "#e1eaf3";
const orange_light = "#ffdead";
yield dot`digraph {
graph [fontname = "helvetica"];
node [fontname = "helvetica"];
edge [fontname = "helvetica"];

node [style=filled]
${n0}[fillcolor="${steelblue_light}"]
${n1}[fillcolor="${orange_light}"]

${n0} [color=lightgreen];
${n0} -> ${n0} [color=blue penwidth=5];
${n0} -> ${n1} [color=blue penwidth=2];
${n1} -> ${n0} [arrowhead=tee color=red penwidth=2] ;
${n1} -> ${n1} [color=blue penwidth=5];
}`;
}
Insert cell
Insert cell
/* rough cut at generating graphviz dot visualization
based on network topology, weights, activation. For now, massively hardcoded.
Could also use D3 force layout. */

hardcodedGraphviz = {
let n0 = ppfloat(activations[0]);
let n1 = ppfloat(activations[1]);

const steelblue_light = "#e1eaf3";
const orange_light = "#ffdead";
yield dot`digraph {
graph [fontname = "helvetica"];
node [fontname = "helvetica"];
edge [fontname = "helvetica"];

node [style=filled]
${n0}[fillcolor="${steelblue_light}"]
${n1}[fillcolor="${orange_light}"]

${n0} [color=lightgreen];
${n0} -> ${n0} [color=blue penwidth=5];
${n0} -> ${n1} [color=blue penwidth=2];
${n1} -> ${n0} [arrowhead=tee color=red penwidth=2] ;
${n1} -> ${n1} [color=blue penwidth=5];
}`;
}
return Number.parseFloat(Number.parseFloat(x).toPrecision(2));
}
Insert cell
viz = {
results.push(net.outputs.dataSync());
amon.log("numTensors:", tf.memory().numTensors);
amon.log("numBytes:", tf.memory().numBytes);
yield net;
}
Insert cell
Insert cell
acts_sample
Insert cell
activations = {
// we cleanup to avoid gpu leak here
viz;
//let viz = hardcodedGraphviz; // chain ourselves after last cell that needs what we'll dispose
let activations = await viz.outputs.array();

activations = activations.map((x) => {
return ppfloat(x);
});
//activations[0] = ppfloat(activations[0]);
net.outputs.dispose();
yield activations;
}
Insert cell
Insert cell
step0 = {
const w = await tf.tensor2d([
[4.5, 1],
[-1, 4.5]
]);
const s = await tf.tensor1d([2, 3]);
return s.dot(w).dataSync();
} // wtf...

// see also https://js.tensorflow.org/api/latest/#layers.multiply
Insert cell
[(2 + -2.75) * 1, (3 - 1.75) * 1]
Insert cell
abc = {
const gains = await tf.tensor1d([1, 2], tf.int32);
const biases = await tf.tensor1d([-2.75, -1.75]);
let states = await tf.tensor1d([2, 3]);
let biased_states = await states.add(biases);
let weighted_biased_states = await biased_states.mul(gains);

return {
states: states,
statesshape: states.shape,
biased_states: biased_states,
biased_statesshape: biased_states.shape,
weighted_biased_states: weighted_biased_states,
weighted_biased_statesshape: weighted_biased_states.shape
};
}
Insert cell
Type JavaScript, then Shift-Enter. Ctrl-space for more options. Arrow ↑/↓ to switch modes.

Insert cell
elementwisemultiple101 = {
const arr1 = tf.tensor1d([10, 20, 30, 40, 50]);
const arr2 = tf.tensor1d([5, 10, 15, 20, 25]);
return arr1.mul(arr2).shape;
}
Insert cell
elementwisemultiplyv2 = {
const arr1 = tf.tensor1d([10, 20, 30, 40, 50]);
const arr2 = tf.tensor1d([5, 10, 15, 20, 25]);
return tf.layers.multiply().apply([arr1, arr2]).shape;
// return arr1.mul(arr2).data();
}
Insert cell
Insert cell
mutable results = [] // = ["A", "B"]
Insert cell
Type JavaScript, then Shift-Enter. Ctrl-space for more options. Arrow ↑/↓ to switch modes.

Insert cell
Type JavaScript, then Shift-Enter. Ctrl-space for more options. Arrow ↑/↓ to switch modes.

Insert cell
function col(data, n) {
return data.map(function (value, index) {
return value[n];
});
}
Insert cell
testgpusize = {
//
//yield await tf.ones([10, 10000000], tf.int32).data();
}
Insert cell
Type JavaScript, then Shift-Enter. Ctrl-space for more options. Arrow ↑/↓ to switch modes.

Insert cell
testmul = {
const rot90 = await tf.tensor2d([
[0, 1],
[-1, 0]
]);

//let theta = 0.2;
const rotate = await tf
.tensor2d([
[Math.sin(theta), -Math.cos(theta)],
[Math.cos(theta), Math.sin(theta)]
])
.transpose();

let A = await tf.tensor2d([
[4.5, 1],
[-1, 4.5]
]);

// let node_outputs = await tf.tensor1d([-0.3, 0.23]);

let node_outputs = await tf.tensor1d([1, 2]);

yield await rotate.dot(node_outputs).data();
}
Insert cell
Insert cell
Insert cell
tf = require("@tensorflow/tfjs")
Insert cell
Insert cell

Purpose-built for displays of data

Observable is your go-to platform for exploring data and creating expressive data visualizations. Use reactive JavaScript notebooks for prototyping and a collaborative canvas for visual data exploration and dashboard creation.
Learn more