mirror of https://github.com/Jittor/Jittor
fix readme
This commit is contained in:
parent
7d2e1b73fa
commit
b338810433
11
README.cn.md
11
README.cn.md
|
@ -16,6 +16,8 @@ Jittor前端语言为Python。前端使用了模块化的设计,这是目前
|
|||
import jittor as jt
|
||||
from jittor import Module
|
||||
from jittor import nn
|
||||
import numpy as np
|
||||
|
||||
class Model(Module):
|
||||
def __init__(self):
|
||||
self.layer1 = nn.Linear(1, 10)
|
||||
|
@ -33,13 +35,18 @@ def get_data(n): # generate random data for training test.
|
|||
y = x*x
|
||||
yield jt.float32(x), jt.float32(y)
|
||||
|
||||
model = Model()
|
||||
|
||||
learning_rate = 0.1
|
||||
batch_size = 50
|
||||
n = 1000
|
||||
|
||||
model = Model()
|
||||
optim = nn.SGD(model.parameters(), learning_rate)
|
||||
|
||||
for i,(x,y) in enumerate(get_data(n)):
|
||||
pred_y = model(x)
|
||||
loss = ((pred_y - y)**2)
|
||||
dy = pred_y - y
|
||||
loss = dy * dy
|
||||
loss_mean = loss.mean()
|
||||
optim.step(loss_mean)
|
||||
print(f"step {i}, loss = {loss_mean.data.sum()}")
|
||||
|
|
11
README.md
11
README.md
|
@ -16,6 +16,8 @@ The following example shows how to model a two-layer neural network step by step
|
|||
import jittor as jt
|
||||
from jittor import Module
|
||||
from jittor import nn
|
||||
import numpy as np
|
||||
|
||||
class Model(Module):
|
||||
def __init__(self):
|
||||
self.layer1 = nn.Linear(1, 10)
|
||||
|
@ -33,13 +35,18 @@ def get_data(n): # generate random data for training test.
|
|||
y = x*x
|
||||
yield jt.float32(x), jt.float32(y)
|
||||
|
||||
model = Model()
|
||||
|
||||
learning_rate = 0.1
|
||||
batch_size = 50
|
||||
n = 1000
|
||||
|
||||
model = Model()
|
||||
optim = nn.SGD(model.parameters(), learning_rate)
|
||||
|
||||
for i,(x,y) in enumerate(get_data(n)):
|
||||
pred_y = model(x)
|
||||
loss = ((pred_y - y)**2)
|
||||
dy = pred_y - y
|
||||
loss = dy * dy
|
||||
loss_mean = loss.mean()
|
||||
optim.step(loss_mean)
|
||||
print(f"step {i}, loss = {loss_mean.data.sum()}")
|
||||
|
|
|
@ -21,6 +21,8 @@ The following example shows how to model a two-layer neural network step by step
|
|||
import jittor as jt
|
||||
from jittor import Module
|
||||
from jittor import nn
|
||||
import numpy as np
|
||||
|
||||
class Model(Module):
|
||||
def __init__(self):
|
||||
self.layer1 = nn.Linear(1, 10)
|
||||
|
@ -38,13 +40,18 @@ def get_data(n): # generate random data for training test.
|
|||
y = x*x
|
||||
yield jt.float32(x), jt.float32(y)
|
||||
|
||||
model = Model()
|
||||
|
||||
learning_rate = 0.1
|
||||
batch_size = 50
|
||||
n = 1000
|
||||
|
||||
model = Model()
|
||||
optim = nn.SGD(model.parameters(), learning_rate)
|
||||
|
||||
for i,(x,y) in enumerate(get_data(n)):
|
||||
pred_y = model(x)
|
||||
loss = ((pred_y - y)**2)
|
||||
dy = pred_y - y
|
||||
loss = dy * dy
|
||||
loss_mean = loss.mean()
|
||||
optim.step(loss_mean)
|
||||
print(f"step {i}, loss = {loss_mean.data.sum()}")
|
||||
|
|
Loading…
Reference in New Issue