From a6caacf54ed222e4477dd9dacdf0f76ce6d7fc97 Mon Sep 17 00:00:00 2001 From: Gword <471184555@qq.com> Date: Mon, 13 Jul 2020 21:17:33 +0800 Subject: [PATCH] update --- python/jittor/test/test_numpy_code_op.py | 2 - src/numpy_func.h | 2 - src/ops/numpy_code_op.cc | 33 +++----- src/ops/numpy_code_op.h | 96 +++++++++++++++++++++++- src/pyjt/py_converter.h | 2 +- 5 files changed, 105 insertions(+), 30 deletions(-) diff --git a/python/jittor/test/test_numpy_code_op.py b/python/jittor/test/test_numpy_code_op.py index 9258bf03..e127250f 100644 --- a/python/jittor/test/test_numpy_code_op.py +++ b/python/jittor/test/test_numpy_code_op.py @@ -16,11 +16,9 @@ class TestCodeOp(unittest.TestCase): c,d = data["outputs"] np.add(a,b,out=c) np.subtract(a,b,out=d) - p, r = c.__array_interface__['data'] def backward_code1(self, np, data): dout = data["dout"] - a,b,dout = data["inputs"] out = data["outputs"][0] np.copyto(out, dout) diff --git a/src/numpy_func.h b/src/numpy_func.h index 4ab01dee..4fb8edf1 100644 --- a/src/numpy_func.h +++ b/src/numpy_func.h @@ -44,11 +44,9 @@ struct NumpyFunc { }; struct NumpyResult { - // vector allocations; map> varrays; map ints; map arrays; - // mem ptr, dtype, shape --> numpy array }; } // jittor \ No newline at end of file diff --git a/src/ops/numpy_code_op.cc b/src/ops/numpy_code_op.cc index d74db0b2..0e2f61fd 100644 --- a/src/ops/numpy_code_op.cc +++ b/src/ops/numpy_code_op.cc @@ -30,11 +30,7 @@ NumpyCodeOp::NumpyCodeOp(NanoVector shape, NanoString dtype, vector&& inpu { _outputs.push_back(create_output(shape, dtype)); CHECKop(_inputs.size(),<=,10); - - if (_outputs[0]->num < 0) { - flags.set(NodeFlags::_vary_shape); - check_vary_shape(_outputs[0]->shape); - } + ASSERT(_outputs[0]->num >= 0); for (int i=0; i&& shapes, vector&& dtype CHECKop(_outputs.size(),>,0); for (int i=0; inum < 0) { - flags.set(NodeFlags::_vary_shape); - check_vary_shape(_outputs[i]->shape); - } + ASSERT(_outputs[i]->num >= 0); } for (int i=0; i&& inpu : _inputs(inputs), forward(forward), _results(move(results)) { _outputs.push_back(create_output(shape, dtype)); - CHECKop(_inputs.size(),<=,10); - - if (_outputs[0]->num < 0) { - flags.set(NodeFlags::_vary_shape); - check_vary_shape(_outputs[0]->shape); - } + CHECKop(_inputs.size(),<=,10) + ASSERT(_outputs[0]->num >= 0); } VarPtr NumpyCodeOp::grad(Var* out, Var* dout, Var* v, int v_index) { - NumpyResult result; + NumpyResult result; int out_index=-1; for (int i=0; i<_outputs.size(); i++) { @@ -84,18 +73,18 @@ VarPtr NumpyCodeOp::grad(Var* out, Var* dout, Var* v, int v_index) { } ASSERT(out_index!=-1); result.ints["out_index"] = out_index; - result.arrays["dout"].ptr=dout; + result.arrays["dout"].ptr=dout; result.arrays["dout"].shape=dout->shape; result.arrays["dout"].dtype=dout->dtype(); auto inputs = clone(_inputs); inputs.push_back(dout); - return make_numpy_code( + return make_numpy_code( _inputs[v_index]->shape, _inputs[v_index]->dtype(), - move(inputs), - backward[v_index], - move(result)); + move(inputs), + backward[v_index], + move(result)); } void NumpyCodeOp::run() { @@ -121,7 +110,7 @@ void NumpyCodeOp::run() { } result.varrays["inputs"] = move(inputs); result.varrays["outputs"] = move(outputs); - forward.callback(&result); + forward.callback(&result); } } // jittor diff --git a/src/ops/numpy_code_op.h b/src/ops/numpy_code_op.h index 35a88400..2ef16ed0 100644 --- a/src/ops/numpy_code_op.h +++ b/src/ops/numpy_code_op.h @@ -19,13 +19,103 @@ struct NumpyCodeOp : Op { vector backward; NumpyResult _results; + /** + Code Operator for easily customized op. + + ---------------- + + * [in] shape: the output shape, a integer array + + * [in] dtype: the output data type + + * [in] inputs: A list of input jittor Vars + + * [in] cpu_src: cpu source code string, buildin value: + + * in{x}, in{x}_shape{y}, in{x}_stride{y}, in{x}_type, in{x}_p, @in0(...) + * out{x}, out{x}_shape{y}, out{x}_stride{y}, out{x}_type, out{x}_p, @out0(...) + * out, out_shape{y}, out_stride{y}, out_type, out_p, @out(...) + + * [in] cpu_grad_src: A list of string, cpu source code string for gradient, represents gradiant for each inputm buildin value, buildin value: + + * in{x}, in{x}_shape{y}, in{x}_stride{y}, in{x}_type, in{x}_p, @in0(...) + * out{x}, out{x}_shape{y}, out{x}_stride{y}, out{x}_type, out{x}_p, @out0(...) + * out, out_shape{y}, out_stride{y}, out_type, out_p, @out(...) + * pout{x}, pout{x}_shape{y}, pout{x}_stride{y}, pout{x}_type, pout{x}_p, @pout{x}(...) + * pout, pout_shape{y}, pout_stride{y}, pout_type, pout_p, @pout(...) + * dout, dout_shape{y}, dout_stride{y}, dout_type, dout_p, @dout(...) + + * [in] cpu_header: cpu header code string. + + * [in] cuda_src: cuda source code string. + + * [in] cuda_grad_src: A list of string. + + * [in] cuda_header: cuda header code string. + + ---------------- + + Example-1:: + + def forward_code(self, np, data): + a = data["inputs"] + b = data["outputs"] + np.add(a,a,out=b) + + def backward_code(self, np, data): + dout = data["dout"] + out = data["outputs"][0] + np.copyto(out, dout) + + a = jt.random((5,1)) + c, d = jt.numpy_code( + a.shape, + a.dtype, + [a], + forward_code, + [backward_code], + ) + + Example-2:: + + def forward_code(self, np, data): + a,b = data["inputs"] + c,d = data["outputs"] + np.add(a,b,out=c) + np.subtract(a,b,out=d) + + def backward_code1(self, np, data): + dout = data["dout"] + out = data["outputs"][0] + np.copyto(out, dout) + + def backward_code2(self, np, data): + dout = data["dout"] + out_index = data["out_index"] + out = data["outputs"][0] + if out_index==0: + np.copyto(out, dout) + else: + np.negative(dout, out) + + a = jt.random((5,1)) + b = jt.random((5,1)) + c, d = jt.numpy_code( + [a.shape, a.shape], + [a.dtype, a.dtype], + [a, b], + forward_code, + [backward_code1,backward_code2], + ) + + */ NumpyCodeOp(NanoVector shape, NanoString dtype, vector&& inputs, NumpyFunc&& forward, vector&& backward); // @attrs(multiple_outputs) NumpyCodeOp(vector&& shapes, vector&& dtypes, vector&& inputs, NumpyFunc&& forward, vector&& backward); - - // @pybind(None) - NumpyCodeOp(NanoVector shape, NanoString dtype, vector&& inputs, NumpyFunc forward, NumpyResult&& results); + + // @pybind(None) + NumpyCodeOp(NanoVector shape, NanoString dtype, vector&& inputs, NumpyFunc forward, NumpyResult&& results); const char* name() const override { return "numpy_code"; } VarPtr grad(Var* out, Var* dout, Var* v, int v_index) override; diff --git a/src/pyjt/py_converter.h b/src/pyjt/py_converter.h index ba66f79d..b97ff57a 100644 --- a/src/pyjt/py_converter.h +++ b/src/pyjt/py_converter.h @@ -67,7 +67,7 @@ DEF_IS(int, bool) is_type(PyObject* obj) { return PyLong_CheckExact(obj); } -DEF_IS(int, PyObject*) to_py_object(const int& a) { +DEF_IS(int, PyObject*) to_py_object(const T& a) { return PyLong_FromLong(a); }