This commit is contained in:
Gword 2020-07-13 21:17:33 +08:00
parent dfa1a4d999
commit a6caacf54e
5 changed files with 105 additions and 30 deletions

View File

@ -16,11 +16,9 @@ class TestCodeOp(unittest.TestCase):
c,d = data["outputs"] c,d = data["outputs"]
np.add(a,b,out=c) np.add(a,b,out=c)
np.subtract(a,b,out=d) np.subtract(a,b,out=d)
p, r = c.__array_interface__['data']
def backward_code1(self, np, data): def backward_code1(self, np, data):
dout = data["dout"] dout = data["dout"]
a,b,dout = data["inputs"]
out = data["outputs"][0] out = data["outputs"][0]
np.copyto(out, dout) np.copyto(out, dout)

View File

@ -44,11 +44,9 @@ struct NumpyFunc {
}; };
struct NumpyResult { struct NumpyResult {
// vector<Allocation> allocations;
map<string, vector<DataView>> varrays; map<string, vector<DataView>> varrays;
map<string, int> ints; map<string, int> ints;
map<string, DataView> arrays; map<string, DataView> arrays;
// mem ptr, dtype, shape --> numpy array
}; };
} // jittor } // jittor

View File

@ -30,11 +30,7 @@ NumpyCodeOp::NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inpu
{ {
_outputs.push_back(create_output(shape, dtype)); _outputs.push_back(create_output(shape, dtype));
CHECKop(_inputs.size(),<=,10); CHECKop(_inputs.size(),<=,10);
ASSERT(_outputs[0]->num >= 0);
if (_outputs[0]->num < 0) {
flags.set(NodeFlags::_vary_shape);
check_vary_shape(_outputs[0]->shape);
}
for (int i=0; i<sbackward.size(); i++) { for (int i=0; i<sbackward.size(); i++) {
backward.push_back(sbackward[i]); backward.push_back(sbackward[i]);
} }
@ -50,10 +46,7 @@ NumpyCodeOp::NumpyCodeOp(vector<NanoVector>&& shapes, vector<NanoString>&& dtype
CHECKop(_outputs.size(),>,0); CHECKop(_outputs.size(),>,0);
for (int i=0; i<shapes.size(); i++) { for (int i=0; i<shapes.size(); i++) {
_outputs[i] = create_output(shapes[i], dtypes[i]); _outputs[i] = create_output(shapes[i], dtypes[i]);
if (_outputs[i]->num < 0) { ASSERT(_outputs[i]->num >= 0);
flags.set(NodeFlags::_vary_shape);
check_vary_shape(_outputs[i]->shape);
}
} }
for (int i=0; i<sbackward.size(); i++) { for (int i=0; i<sbackward.size(); i++) {
backward.push_back(sbackward[i]); backward.push_back(sbackward[i]);
@ -64,16 +57,12 @@ NumpyCodeOp::NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inpu
: _inputs(inputs), forward(forward), _results(move(results)) : _inputs(inputs), forward(forward), _results(move(results))
{ {
_outputs.push_back(create_output(shape, dtype)); _outputs.push_back(create_output(shape, dtype));
CHECKop(_inputs.size(),<=,10); CHECKop(_inputs.size(),<=,10)
ASSERT(_outputs[0]->num >= 0);
if (_outputs[0]->num < 0) {
flags.set(NodeFlags::_vary_shape);
check_vary_shape(_outputs[0]->shape);
}
} }
VarPtr NumpyCodeOp::grad(Var* out, Var* dout, Var* v, int v_index) { VarPtr NumpyCodeOp::grad(Var* out, Var* dout, Var* v, int v_index) {
NumpyResult result; NumpyResult result;
int out_index=-1; int out_index=-1;
for (int i=0; i<_outputs.size(); i++) { for (int i=0; i<_outputs.size(); i++) {
@ -84,18 +73,18 @@ VarPtr NumpyCodeOp::grad(Var* out, Var* dout, Var* v, int v_index) {
} }
ASSERT(out_index!=-1); ASSERT(out_index!=-1);
result.ints["out_index"] = out_index; result.ints["out_index"] = out_index;
result.arrays["dout"].ptr=dout; result.arrays["dout"].ptr=dout;
result.arrays["dout"].shape=dout->shape; result.arrays["dout"].shape=dout->shape;
result.arrays["dout"].dtype=dout->dtype(); result.arrays["dout"].dtype=dout->dtype();
auto inputs = clone(_inputs); auto inputs = clone(_inputs);
inputs.push_back(dout); inputs.push_back(dout);
return make_numpy_code( return make_numpy_code(
_inputs[v_index]->shape, _inputs[v_index]->shape,
_inputs[v_index]->dtype(), _inputs[v_index]->dtype(),
move(inputs), move(inputs),
backward[v_index], backward[v_index],
move(result)); move(result));
} }
void NumpyCodeOp::run() { void NumpyCodeOp::run() {
@ -121,7 +110,7 @@ void NumpyCodeOp::run() {
} }
result.varrays["inputs"] = move(inputs); result.varrays["inputs"] = move(inputs);
result.varrays["outputs"] = move(outputs); result.varrays["outputs"] = move(outputs);
forward.callback(&result); forward.callback(&result);
} }
} // jittor } // jittor

View File

@ -19,13 +19,103 @@ struct NumpyCodeOp : Op {
vector<NumpyFunc> backward; vector<NumpyFunc> backward;
NumpyResult _results; NumpyResult _results;
/**
Code Operator for easily customized op.
----------------
* [in] shape: the output shape, a integer array
* [in] dtype: the output data type
* [in] inputs: A list of input jittor Vars
* [in] cpu_src: cpu source code string, buildin value:
* in{x}, in{x}_shape{y}, in{x}_stride{y}, in{x}_type, in{x}_p, @in0(...)
* out{x}, out{x}_shape{y}, out{x}_stride{y}, out{x}_type, out{x}_p, @out0(...)
* out, out_shape{y}, out_stride{y}, out_type, out_p, @out(...)
* [in] cpu_grad_src: A list of string, cpu source code string for gradient, represents gradiant for each inputm buildin value, buildin value:
* in{x}, in{x}_shape{y}, in{x}_stride{y}, in{x}_type, in{x}_p, @in0(...)
* out{x}, out{x}_shape{y}, out{x}_stride{y}, out{x}_type, out{x}_p, @out0(...)
* out, out_shape{y}, out_stride{y}, out_type, out_p, @out(...)
* pout{x}, pout{x}_shape{y}, pout{x}_stride{y}, pout{x}_type, pout{x}_p, @pout{x}(...)
* pout, pout_shape{y}, pout_stride{y}, pout_type, pout_p, @pout(...)
* dout, dout_shape{y}, dout_stride{y}, dout_type, dout_p, @dout(...)
* [in] cpu_header: cpu header code string.
* [in] cuda_src: cuda source code string.
* [in] cuda_grad_src: A list of string.
* [in] cuda_header: cuda header code string.
----------------
Example-1::
def forward_code(self, np, data):
a = data["inputs"]
b = data["outputs"]
np.add(a,a,out=b)
def backward_code(self, np, data):
dout = data["dout"]
out = data["outputs"][0]
np.copyto(out, dout)
a = jt.random((5,1))
c, d = jt.numpy_code(
a.shape,
a.dtype,
[a],
forward_code,
[backward_code],
)
Example-2::
def forward_code(self, np, data):
a,b = data["inputs"]
c,d = data["outputs"]
np.add(a,b,out=c)
np.subtract(a,b,out=d)
def backward_code1(self, np, data):
dout = data["dout"]
out = data["outputs"][0]
np.copyto(out, dout)
def backward_code2(self, np, data):
dout = data["dout"]
out_index = data["out_index"]
out = data["outputs"][0]
if out_index==0:
np.copyto(out, dout)
else:
np.negative(dout, out)
a = jt.random((5,1))
b = jt.random((5,1))
c, d = jt.numpy_code(
[a.shape, a.shape],
[a.dtype, a.dtype],
[a, b],
forward_code,
[backward_code1,backward_code2],
)
*/
NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inputs, NumpyFunc&& forward, vector<NumpyFunc>&& backward); NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inputs, NumpyFunc&& forward, vector<NumpyFunc>&& backward);
// @attrs(multiple_outputs) // @attrs(multiple_outputs)
NumpyCodeOp(vector<NanoVector>&& shapes, vector<NanoString>&& dtypes, vector<Var*>&& inputs, NumpyFunc&& forward, vector<NumpyFunc>&& backward); NumpyCodeOp(vector<NanoVector>&& shapes, vector<NanoString>&& dtypes, vector<Var*>&& inputs, NumpyFunc&& forward, vector<NumpyFunc>&& backward);
// @pybind(None) // @pybind(None)
NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inputs, NumpyFunc forward, NumpyResult&& results); NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inputs, NumpyFunc forward, NumpyResult&& results);
const char* name() const override { return "numpy_code"; } const char* name() const override { return "numpy_code"; }
VarPtr grad(Var* out, Var* dout, Var* v, int v_index) override; VarPtr grad(Var* out, Var* dout, Var* v, int v_index) override;

View File

@ -67,7 +67,7 @@ DEF_IS(int, bool) is_type(PyObject* obj) {
return PyLong_CheckExact(obj); return PyLong_CheckExact(obj);
} }
DEF_IS(int, PyObject*) to_py_object(const int& a) { DEF_IS(int, PyObject*) to_py_object(const T& a) {
return PyLong_FromLong(a); return PyLong_FromLong(a);
} }