fix readme

This commit is contained in:
Dun Liang 2020-04-10 13:34:52 +08:00
parent 7d2e1b73fa
commit b338810433
3 changed files with 27 additions and 6 deletions

View File

@ -16,6 +16,8 @@ Jittor前端语言为Python。前端使用了模块化的设计这是目前
import jittor as jt
from jittor import Module
from jittor import nn
import numpy as np
class Model(Module):
def __init__(self):
self.layer1 = nn.Linear(1, 10)
@ -33,13 +35,18 @@ def get_data(n): # generate random data for training test.
y = x*x
yield jt.float32(x), jt.float32(y)
model = Model()
learning_rate = 0.1
batch_size = 50
n = 1000
model = Model()
optim = nn.SGD(model.parameters(), learning_rate)
for i,(x,y) in enumerate(get_data(n)):
pred_y = model(x)
loss = ((pred_y - y)**2)
dy = pred_y - y
loss = dy * dy
loss_mean = loss.mean()
optim.step(loss_mean)
print(f"step {i}, loss = {loss_mean.data.sum()}")

View File

@ -16,6 +16,8 @@ The following example shows how to model a two-layer neural network step by step
import jittor as jt
from jittor import Module
from jittor import nn
import numpy as np
class Model(Module):
def __init__(self):
self.layer1 = nn.Linear(1, 10)
@ -33,13 +35,18 @@ def get_data(n): # generate random data for training test.
y = x*x
yield jt.float32(x), jt.float32(y)
model = Model()
learning_rate = 0.1
batch_size = 50
n = 1000
model = Model()
optim = nn.SGD(model.parameters(), learning_rate)
for i,(x,y) in enumerate(get_data(n)):
pred_y = model(x)
loss = ((pred_y - y)**2)
dy = pred_y - y
loss = dy * dy
loss_mean = loss.mean()
optim.step(loss_mean)
print(f"step {i}, loss = {loss_mean.data.sum()}")

View File

@ -21,6 +21,8 @@ The following example shows how to model a two-layer neural network step by step
import jittor as jt
from jittor import Module
from jittor import nn
import numpy as np
class Model(Module):
def __init__(self):
self.layer1 = nn.Linear(1, 10)
@ -38,13 +40,18 @@ def get_data(n): # generate random data for training test.
y = x*x
yield jt.float32(x), jt.float32(y)
model = Model()
learning_rate = 0.1
batch_size = 50
n = 1000
model = Model()
optim = nn.SGD(model.parameters(), learning_rate)
for i,(x,y) in enumerate(get_data(n)):
pred_y = model(x)
loss = ((pred_y - y)**2)
dy = pred_y - y
loss = dy * dy
loss_mean = loss.mean()
optim.step(loss_mean)
print(f"step {i}, loss = {loss_mean.data.sum()}")