polish dataset worker error handle

This commit is contained in:
Dun Liang 2021-03-18 22:29:51 +08:00
parent 6c4a2dc463
commit e674efba84
6 changed files with 15 additions and 1 deletions

View File

@ -8,7 +8,7 @@
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
__version__ = '1.2.2.51'
__version__ = '1.2.2.52'
from . import lock
with lock.lock_scope():
ori_int = int

View File

@ -84,6 +84,7 @@ class TestDataset2(unittest.TestCase):
self.set_attrs(total_len=10240)
def __getitem__(self, k):
self.tmp = None
x = jt.array(k)
y = x
for i in range(10):
@ -93,6 +94,8 @@ class TestDataset2(unittest.TestCase):
return x, y
dataset = YourDataset().set_attrs(batch_size=256, shuffle=True, num_workers=4)
dataset.tmp = jt.array([1,2,3,4,5])
dataset.tmp.sync()
for x, y in dataset:
# dataset.display_worker_status()
pass

View File

@ -80,10 +80,15 @@ void add_set_seed_callback(set_seed_callback callback) {
std::default_random_engine* get_random_engine() { return eng.get(); }
#ifdef HAS_CUDA
bool no_cuda_error_when_free = 0;
#endif
void jt_init_subprocess() {
#ifdef HAS_CUDA
use_cuda = 0;
exe.last_is_cuda = false;
no_cuda_error_when_free = 1;
#endif
}

View File

@ -12,6 +12,7 @@
namespace jittor {
CudaDeviceAllocator cuda_device_allocator;
extern bool no_cuda_error_when_free;
const char* CudaDeviceAllocator::name() const {return "cuda_device";}
@ -34,6 +35,7 @@ void* CudaDeviceAllocator::alloc(size_t size, size_t& allocation) {
void CudaDeviceAllocator::free(void* mem_ptr, size_t size, const size_t& allocation) {
if (size==0) return;
if (no_cuda_error_when_free) return;
checkCudaErrors(cudaFree(mem_ptr));
}

View File

@ -26,6 +26,7 @@ struct DualAllocation {
extern SFRLAllocator cuda_dual_host_allocator;
extern SFRLAllocator cuda_dual_device_allocator;
extern bool no_cuda_error_when_free;
struct CudaDualAllocator : Allocator {
//for recycle block_id
@ -95,6 +96,7 @@ struct DelayFree final : Allocator {
};
void free(void* mem_ptr, size_t size, const size_t& allocation) override {
using namespace cuda_dual_local;
if (no_cuda_error_when_free) return;
allocations.emplace_back(mem_ptr, allocation, size, &cuda_dual_allocator);
peekCudaErrors(_cudaLaunchHostFunc(0, &to_free_allocation, 0));
}

View File

@ -13,6 +13,7 @@ namespace jittor {
CudaManagedAllocator cuda_managed_allocator;
DEFINE_FLAG(int, use_cuda_managed_allocator, 1, "Enable cuda_managed_allocator");
extern bool no_cuda_error_when_free;
const char* CudaManagedAllocator::name() const {return "cuda_managed";}
@ -25,6 +26,7 @@ void* CudaManagedAllocator::alloc(size_t size, size_t& allocation) {
void CudaManagedAllocator::free(void* mem_ptr, size_t size, const size_t& allocation) {
if (size==0) return;
if (no_cuda_error_when_free) return;
checkCudaErrors(cudaFree(mem_ptr));
}