mirror of https://github.com/Jittor/Jittor
fix ci bug
This commit is contained in:
parent
af2a9230f7
commit
6e5876df65
|
@ -61,6 +61,7 @@ class test_models(unittest.TestCase):
|
|||
|
||||
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
|
||||
@jt.flag_scope(use_cuda=1)
|
||||
@torch.no_grad()
|
||||
def test_models(self):
|
||||
def to_cuda(x):
|
||||
if jt.has_cuda:
|
||||
|
@ -96,6 +97,9 @@ class test_models(unittest.TestCase):
|
|||
diff = relative_error.mean()
|
||||
assert diff < threshold, f"[*] {test_model} forward fails..., Relative Error: {diff}"
|
||||
print(f"[*] {test_model} forword passes with Relative Error {diff}")
|
||||
jt.clean()
|
||||
jt.gc()
|
||||
torch.cuda.empty_cache()
|
||||
print('all models pass test.')
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -19,7 +19,10 @@ void* CudaDeviceAllocator::alloc(size_t size, size_t& allocation) {
|
|||
try {
|
||||
checkCudaErrors(cudaMalloc(&ptr, size));
|
||||
return ptr;
|
||||
} catch (...) {}
|
||||
} catch (...) {
|
||||
// clean the last error
|
||||
cudaGetLastError();
|
||||
}
|
||||
LOGw << "Unable to alloc cuda device memory, use unify memory instead. "
|
||||
"This may cause low performance.";
|
||||
display_memory_info(__FILELINE__);
|
||||
|
|
Loading…
Reference in New Issue