mirror of https://github.com/Jittor/Jittor
update
This commit is contained in:
parent
dfa1a4d999
commit
a6caacf54e
|
@ -16,11 +16,9 @@ class TestCodeOp(unittest.TestCase):
|
|||
c,d = data["outputs"]
|
||||
np.add(a,b,out=c)
|
||||
np.subtract(a,b,out=d)
|
||||
p, r = c.__array_interface__['data']
|
||||
|
||||
def backward_code1(self, np, data):
|
||||
dout = data["dout"]
|
||||
a,b,dout = data["inputs"]
|
||||
out = data["outputs"][0]
|
||||
np.copyto(out, dout)
|
||||
|
||||
|
|
|
@ -44,11 +44,9 @@ struct NumpyFunc {
|
|||
};
|
||||
|
||||
struct NumpyResult {
|
||||
// vector<Allocation> allocations;
|
||||
map<string, vector<DataView>> varrays;
|
||||
map<string, int> ints;
|
||||
map<string, DataView> arrays;
|
||||
// mem ptr, dtype, shape --> numpy array
|
||||
};
|
||||
|
||||
} // jittor
|
|
@ -30,11 +30,7 @@ NumpyCodeOp::NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inpu
|
|||
{
|
||||
_outputs.push_back(create_output(shape, dtype));
|
||||
CHECKop(_inputs.size(),<=,10);
|
||||
|
||||
if (_outputs[0]->num < 0) {
|
||||
flags.set(NodeFlags::_vary_shape);
|
||||
check_vary_shape(_outputs[0]->shape);
|
||||
}
|
||||
ASSERT(_outputs[0]->num >= 0);
|
||||
for (int i=0; i<sbackward.size(); i++) {
|
||||
backward.push_back(sbackward[i]);
|
||||
}
|
||||
|
@ -50,10 +46,7 @@ NumpyCodeOp::NumpyCodeOp(vector<NanoVector>&& shapes, vector<NanoString>&& dtype
|
|||
CHECKop(_outputs.size(),>,0);
|
||||
for (int i=0; i<shapes.size(); i++) {
|
||||
_outputs[i] = create_output(shapes[i], dtypes[i]);
|
||||
if (_outputs[i]->num < 0) {
|
||||
flags.set(NodeFlags::_vary_shape);
|
||||
check_vary_shape(_outputs[i]->shape);
|
||||
}
|
||||
ASSERT(_outputs[i]->num >= 0);
|
||||
}
|
||||
for (int i=0; i<sbackward.size(); i++) {
|
||||
backward.push_back(sbackward[i]);
|
||||
|
@ -64,12 +57,8 @@ NumpyCodeOp::NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inpu
|
|||
: _inputs(inputs), forward(forward), _results(move(results))
|
||||
{
|
||||
_outputs.push_back(create_output(shape, dtype));
|
||||
CHECKop(_inputs.size(),<=,10);
|
||||
|
||||
if (_outputs[0]->num < 0) {
|
||||
flags.set(NodeFlags::_vary_shape);
|
||||
check_vary_shape(_outputs[0]->shape);
|
||||
}
|
||||
CHECKop(_inputs.size(),<=,10)
|
||||
ASSERT(_outputs[0]->num >= 0);
|
||||
}
|
||||
|
||||
VarPtr NumpyCodeOp::grad(Var* out, Var* dout, Var* v, int v_index) {
|
||||
|
|
|
@ -19,6 +19,96 @@ struct NumpyCodeOp : Op {
|
|||
vector<NumpyFunc> backward;
|
||||
NumpyResult _results;
|
||||
|
||||
/**
|
||||
Code Operator for easily customized op.
|
||||
|
||||
----------------
|
||||
|
||||
* [in] shape: the output shape, a integer array
|
||||
|
||||
* [in] dtype: the output data type
|
||||
|
||||
* [in] inputs: A list of input jittor Vars
|
||||
|
||||
* [in] cpu_src: cpu source code string, buildin value:
|
||||
|
||||
* in{x}, in{x}_shape{y}, in{x}_stride{y}, in{x}_type, in{x}_p, @in0(...)
|
||||
* out{x}, out{x}_shape{y}, out{x}_stride{y}, out{x}_type, out{x}_p, @out0(...)
|
||||
* out, out_shape{y}, out_stride{y}, out_type, out_p, @out(...)
|
||||
|
||||
* [in] cpu_grad_src: A list of string, cpu source code string for gradient, represents gradiant for each inputm buildin value, buildin value:
|
||||
|
||||
* in{x}, in{x}_shape{y}, in{x}_stride{y}, in{x}_type, in{x}_p, @in0(...)
|
||||
* out{x}, out{x}_shape{y}, out{x}_stride{y}, out{x}_type, out{x}_p, @out0(...)
|
||||
* out, out_shape{y}, out_stride{y}, out_type, out_p, @out(...)
|
||||
* pout{x}, pout{x}_shape{y}, pout{x}_stride{y}, pout{x}_type, pout{x}_p, @pout{x}(...)
|
||||
* pout, pout_shape{y}, pout_stride{y}, pout_type, pout_p, @pout(...)
|
||||
* dout, dout_shape{y}, dout_stride{y}, dout_type, dout_p, @dout(...)
|
||||
|
||||
* [in] cpu_header: cpu header code string.
|
||||
|
||||
* [in] cuda_src: cuda source code string.
|
||||
|
||||
* [in] cuda_grad_src: A list of string.
|
||||
|
||||
* [in] cuda_header: cuda header code string.
|
||||
|
||||
----------------
|
||||
|
||||
Example-1::
|
||||
|
||||
def forward_code(self, np, data):
|
||||
a = data["inputs"]
|
||||
b = data["outputs"]
|
||||
np.add(a,a,out=b)
|
||||
|
||||
def backward_code(self, np, data):
|
||||
dout = data["dout"]
|
||||
out = data["outputs"][0]
|
||||
np.copyto(out, dout)
|
||||
|
||||
a = jt.random((5,1))
|
||||
c, d = jt.numpy_code(
|
||||
a.shape,
|
||||
a.dtype,
|
||||
[a],
|
||||
forward_code,
|
||||
[backward_code],
|
||||
)
|
||||
|
||||
Example-2::
|
||||
|
||||
def forward_code(self, np, data):
|
||||
a,b = data["inputs"]
|
||||
c,d = data["outputs"]
|
||||
np.add(a,b,out=c)
|
||||
np.subtract(a,b,out=d)
|
||||
|
||||
def backward_code1(self, np, data):
|
||||
dout = data["dout"]
|
||||
out = data["outputs"][0]
|
||||
np.copyto(out, dout)
|
||||
|
||||
def backward_code2(self, np, data):
|
||||
dout = data["dout"]
|
||||
out_index = data["out_index"]
|
||||
out = data["outputs"][0]
|
||||
if out_index==0:
|
||||
np.copyto(out, dout)
|
||||
else:
|
||||
np.negative(dout, out)
|
||||
|
||||
a = jt.random((5,1))
|
||||
b = jt.random((5,1))
|
||||
c, d = jt.numpy_code(
|
||||
[a.shape, a.shape],
|
||||
[a.dtype, a.dtype],
|
||||
[a, b],
|
||||
forward_code,
|
||||
[backward_code1,backward_code2],
|
||||
)
|
||||
|
||||
*/
|
||||
NumpyCodeOp(NanoVector shape, NanoString dtype, vector<Var*>&& inputs, NumpyFunc&& forward, vector<NumpyFunc>&& backward);
|
||||
|
||||
// @attrs(multiple_outputs)
|
||||
|
|
|
@ -67,7 +67,7 @@ DEF_IS(int, bool) is_type(PyObject* obj) {
|
|||
return PyLong_CheckExact(obj);
|
||||
}
|
||||
|
||||
DEF_IS(int, PyObject*) to_py_object(const int& a) {
|
||||
DEF_IS(int, PyObject*) to_py_object(const T& a) {
|
||||
return PyLong_FromLong(a);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue