Public
Edited
May 8, 2023
Fork of micrograd.js
1 star
Insert cell
Insert cell
{
const a = new Value(-5, "a");
const b = new Value(3, "b");
const c = a.mul(b); c.label = "c";
const r = c.relu();
r.backward();
return visualize(r);
}
Insert cell
{
const a = new Value(5, "a");
const b = new Value(3, "b");
const c = new Value(1, "c");
let s;
for (let k = 0; k < 500; k++) {
s = Value.sce([a, b, c], 0);
s.backward();
a.value -= a.gradient * 0.01;
b.value -= b.gradient * 0.01;
c.value -= c.gradient * 0.01;
}
return visualize(s);
}
Insert cell
5 - (-0.015376131670486492 * 0.01)
Insert cell
class Value {
constructor(value, label = "", operator = "", children = [], exponent = 1, ixp = 0) {
this.value = value;
this.label = label;
this.operator = operator;
this.gradient = 0;
this.exponent = exponent;
this.ixp = ixp;
this.children = children;
}

add(other) {
if (typeof other === "number") other = new Value(other);
const newValue = this.value + other.value;
return new Value(newValue, "", "+", [this, other]);
}

sub(other) {
if (typeof other === "number") other = new Value(other);
return this.add(other.mul(-1));
}

mul(other) {
if (typeof other === "number") other = new Value(other);
const newValue = this.value * other.value;
return new Value(newValue, "", "*", [this, other]);
}

div(other) {
if (typeof other === "number") other = new Value(other);
return this.mul(other.pow(-1));
}

neg() {
return this.mul(-1);
}

pow(x) {
const newValue = Math.pow(this.value, x);
this.exponent = x;
return new Value(newValue, "", "^", [this], x);
}

exp() {
const newValue = Math.exp(this.value);
return new Value(newValue, "", "exp", [this]);
}

tanh() {
const newValue = Math.tanh(this.value);
return new Value(newValue, "", "tanh", [this]);
}

relu() {
const newValue = this.value < 0 ? 0 : this.value;
return new Value(newValue, "", "ReLU", [this]);
}

// softmax cross-entropy loss
static sce(xs, ixp) { // xs: predictions, ixp: index of the positive class
const xp = xs[ixp];
const xsv = xs.map(x => x.value);
const D = -Math.max(...xsv);
const exps = xs.map(x => Math.exp(x.value + D));
const sum = exps.reduce((acc, e) => acc + e, 0);
const quot = Math.exp(xp.value + D) / sum;
const sce = -Math.log(quot);
return new Value(sce, "", "SCE", [...xs], 1, ixp);
}

backward() {
this.gradient = 1;

let topo = [];
let visited = new Set();

const buildTopo = (v) => {
if (!visited.has(v)) {
visited.add(v);
for (let child of v.children) {
buildTopo(child);
}
topo.push(v);
}
};

buildTopo(this);

for (let node of topo.reverse()) {
node._setChildGradients();
}
}

_setChildGradients() {
const saved = this.children.map(c => ({ v: c.value, g: c.gradient, op: c.operator }));
switch (this.operator) {
case "+": {
const [left, right] = this.children;
left.gradient += this.gradient;
right.gradient += this.gradient;
break;
}
case "*": {
const [left, right] = this.children;
left.gradient += this.gradient * right.value;
right.gradient += this.gradient * left.value;
break;
}
case "^": {
const [c] = this.children;
c.gradient += this.exponent * (Math.pow(c.value, this.exponent - 1)) * this.gradient;
break;
}
case "tanh": {
const [c] = this.children;
c.gradient += this.gradient * (1 - Math.pow(this.value, 2));
break;
}
case "exp": {
const [c] = this.children;
c.gradient += this.gradient * this.value;
break;
}
case "ReLU": {
const [c] = this.children;
c.gradient += this.gradient * (c.value < 0 ? 0 : 1);
break;
}
case "SCE": {
const D = -Math.max(...this.children.map(x => x.value));
//throw new Error (`childValues: ${JSON.stringify(this.children.map(c => c.value))}, D: ${D}`);
const exps = this.children.map(x => Math.exp(x.value + D));
const sum = exps.reduce((acc, e) => acc + e, 0);
//throw new Error (`exps: ${JSON.stringify(exps)}, sum: ${sum}`);
const softmax = exps.map(exp => exp / sum);
//throw new Error (`softmax: ${JSON.stringify(softmax)}`);
softmax.forEach((sm, i) => {
const yi = i === this.ixp ? 1 : 0;
//throw new Error(`this.children[${i}].gradient === ${this.children[i].gradient}, this.gradient === ${this.gradient}, (sm - yi) === ${sm - yi}, sm === ${sm}, yi === ${yi}`)
this.children[i].gradient += this.gradient * (sm - yi);
})
break;
}
case "":
break;
default:
throw new Error(`Operator '${this.operator}' not implemented!`);
break;
}
}
}
Insert cell
{
const childValues = [-3, 25, 7];
return -Math.max(...childValues.map(c => c))
}
Insert cell
function preventInfinity(x) {
if (isFinite(x)) return x;
if (x > 0) return Number.MAX_VALUE;
return Number.MIN_VALUE;
}
Insert cell
function visualize(value) {
const children = value.children.map(c => visualize(c)).reduce((curr, prev) => html`${prev}${curr}`, html``);
return html`
<div class="tree">
<div><b>${value.label}</b>(${value.value}, grad = ${value.gradient})</div>
<div class="tree-branch-wrapper">
<div class="operator">
${value.operator === "exp" ? "eˣ" : value.operator}
${value.exponent !== 1 ? value.exponent : ""}
</div>
<div class="tree-branch">
${children}
</div>
</div>
</div>
`;
}
Insert cell
Insert cell
{
const a = new Value(2, "a");
const b = new Value(3, "b");
const c = a.add(b); c.label = "c";
const d = new Value(5, "d");
const e = d.mul(b); e.label = "e";
const f = c.mul(e); f.label = "f";
f.backward();
return visualize(f)
}
Insert cell
class Neuron {
constructor(nin, isOutput) {
this.w = [];
for (let i = 0; i < nin; i++)
this.w.push(new Value((Math.random() * 2) - 1))
this.b = new Value((Math.random() * 2) - 1);
this.isOutput = isOutput;
}

call(x) {
const z = x.map((x, i) => this.w[i].mul(x)).reduce((sum, p) => sum.add(p), new Value(0)).add(this.b);
return this.isOutput ? z : z.relu();
}

parameters() {
return [...this.w, this.b];
}
}
Insert cell
class Layer {
constructor(nin, nout, isOutput) {
this.neurons = [];
for (let i = 0; i < nout; i++) {
this.neurons.push(new Neuron(nin, isOutput));
}
}

call(x) {
const outs = this.neurons.map(n => n.call(x));
return outs.length === 1 ? outs[0] : outs;
}

parameters() {
return this.neurons.reduce((p, n) => p.concat(n.parameters()), []);
}
}
Insert cell
class MLP {
constructor(nin, nouts) {
const sizes = [nin].concat(nouts);
this.layers = [];
for (let i = 0; i < nouts.length; i++) {
this.layers.push(new Layer(sizes[i], sizes[i + 1], i === nouts.length - 1));
}
}

call(x) {
for (const layer of this.layers) {
x = layer.call(x);
}
return x;
}

parameters() {
return this.layers.reduce((p, n) => p.concat(n.parameters()), []);
}
}
Insert cell
Insert cell
colors_2000 = FileAttachment("colors_2000.json").json()
Insert cell
xs = colors_2000.map(([color, _]) => color.map(c => c / 255));
Insert cell
ys = colors_2000.map(([_, y]) => y);
Insert cell
function getPositiveIndex(ygt) {
return ygt.findIndex(i => i === 1);
}
Insert cell
{
// const n = new MLP(3, [4, 4, 3]);
// const iterations = 20;
// const learningRate = 0.5;
// let ypred, loss;
// for (let k = 0; k < iterations; k++) {
// ypred = n.call([255, 0, 0]);
// const positiveIndex = 1;
// loss = Value.sce(ypred, positiveIndex);
// console.log("loss:", loss.value, "ypred", ypred.map(p => p.value).join(":"));
// for (const p of n.parameters()) {
// p.gradient = 0;
// }
// loss.backward();
// // learning
// for (const p of n.parameters()) {
// p.value -= p.gradient * learningRate;
// }
// }
// return { loss: loss.value, ypred: ypred.map(y => y.value).join(":") };
}
Insert cell
function train1() {
const n = new MLP(3, [16, 4, 4, 4, 3]);
const losses = [];
let ypred;
let loss;
let i = 0;
const epochs = 3;
const batchSize = 32;
const learningRate = 0.01;

const allData = _.shuffle(colors_2000);
const trainingData = allData.slice(0, 4000);
const testData = allData.slice(4000);

for (let k = 0; k < epochs; k++) {
const batches = _.chunk(trainingData, 10);

for (const batch of batches) {
const xs = batch.map(([x, y]) => x);
const ys = batch.map(([x, y]) => y);
// forward pass
ypred = xs.map(x => n.call(x));
loss = ypred.reduce((sum, yp, i) => Value.sce(yp, getPositiveIndex(ys[i])).add(sum), 0).div(batch.length);
losses.push({ i, k, xs, ys, loss: Math.min(loss.value, 2), l: batch.length });
i++;
// backward pass
for (const p of n.parameters()) {
p.gradient = 0;
}
loss.backward();
// learning
for (const p of n.parameters()) {
p.value -= p.gradient * learningRate;
}
}
}

let cc = 0;
for (const [input, expected] of testData) {
const actual = n.call(input);
if (isCorrect(actual, expected)) {
cc++;
}
}
return cc / testData.length * 100;

function isCorrect(ygts, yps) {
let maxIndexArr1 = 0;
let maxIndexArr2 = 0;
for (let i = 1; i < ygts.length; i++) {
if (ygts[i] > ygts[maxIndexArr1]) {
maxIndexArr1 = i;
}
if (yps[i] > yps[maxIndexArr2]) {
maxIndexArr2 = i;
}
}
return maxIndexArr1 === maxIndexArr2;
}
return Plot.line(losses, { x: "i", y: "loss" }).plot();
//return losses;
}
Insert cell
train1()
Insert cell
_.shuffle([1, 2, 3, 4, 5, 6, 7])
Insert cell
{
const a = new Value(-3, "a");
const b = new Value(25, "b");
const c = new Value(7, "c");
const L = Value.sce([a, b, c], 0); L.label = "L";
L.backward();
return visualize(L);
}
Insert cell
outputLayerGradients([-3, 25, 7], 0, 1)
Insert cell
function outputLayerGradients(outputValues, positiveClassIndex, parentGradient) {
const maxOutputValue = Math.max(...outputValues);
const exps = outputValues.map(value => Math.exp(value - maxOutputValue));
const sumExps = exps.reduce((sum, exp) => sum + exp, 0);
const softmaxOutputs = exps.map(exp => exp / sumExps);

const gradients = softmaxOutputs.map((softmaxOutput, index) =>
parentGradient * (index === positiveClassIndex ? softmaxOutput - 1 : softmaxOutput)
);

return gradients;
}
Insert cell
{
const data = [];
for (let x = -10; x <= 10; x++) {
data.push({ x, y: Math.pow(x, 2) });
}
return Plot.line(data, {x: "x", y: "y"}).plot()
}
Insert cell
function showData([color, [isBlue, isRed, isYellow]]) {
return html`
<div>
rgb(${color.join(", ")})
<span style="display: inline-block; background-color: rgb(${color.join(", ")})">${isBlue ? "blue" : (isRed ? "red" : "yellow")}</span>
</div>`
}
Insert cell
{
const sc = _.shuffle(colors_2000);
return sc.slice(0, 20).reduce((acc, c) => html`${acc}${showData(c)}`, html``);
}
Insert cell

One platform to build and deploy the best data apps

Experiment and prototype by building visualizations in live JavaScript notebooks. Collaborate with your team and decide which concepts to build out.
Use Observable Framework to build data apps locally. Use data loaders to build in any language or library, including Python, SQL, and R.
Seamlessly deploy to Observable. Test before you ship, use automatic deploy-on-commit, and ensure your projects are always up-to-date.
Learn more