From fe06f562ecf7914f439503693955e76afcd33382 Mon Sep 17 00:00:00 2001 From: Douglas De Rizzo Meneghetti Date: Wed, 6 Nov 2019 22:11:40 -0300 Subject: [PATCH] apparently, async was never needed removed a runaway '=True' --- elf/utils_elf.py | 6 +++--- elf_python/memory_receiver.py | 8 ++++---- rlpytorch/runner/parameter_server.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/elf/utils_elf.py b/elf/utils_elf.py index 84e89e5a..9a80b8d7 100644 --- a/elf/utils_elf.py +++ b/elf/utils_elf.py @@ -188,13 +188,13 @@ def hist(self, s, key=None): else: return self[key][s] - def transfer_cpu2gpu(self, batch_gpu, asynch=True): + def transfer_cpu2gpu(self, batch_gpu): ''' transfer batch data to gpu ''' # For each time step for k, v in self.batch.items(): - batch_gpu[k].copy_(v, asynch=asynch) + batch_gpu[k].copy_(v) - def transfer_cpu2cpu(self, batch_dst, asynch=True): + def transfer_cpu2cpu(self, batch_dst): ''' transfer batch data to cpu ''' # For each time step diff --git a/elf_python/memory_receiver.py b/elf_python/memory_receiver.py index 9d91974c..9e44655b 100644 --- a/elf_python/memory_receiver.py +++ b/elf_python/memory_receiver.py @@ -75,19 +75,19 @@ def _cpu2gpu(batch_cpu, batch_gpu, allow_incomplete_batch=False): if isinstance(batch_cpu_t[k], (torch.FloatTensor, torch.LongTensor)): if allow_incomplete_batch: if len(batch_cpu_t[k].size()) == 1: - batch_gpu_t[k] = batch_cpu_t[k][:batchsize].cuda(asynch=True) + batch_gpu_t[k] = batch_cpu_t[k][:batchsize].cuda() else: - batch_gpu_t[k] = batch_cpu_t[k][:batchsize, :].cuda(asynch=True) + batch_gpu_t[k] = batch_cpu_t[k][:batchsize, :].cuda() else: if isinstance(batch_cpu_t[k], torch.FloatTensor): if k not in batch_gpu_t: batch_gpu_t[k] = torch.cuda.FloatTensor(batch_cpu_t[k].size()) - batch_gpu_t[k].copy_(batch_cpu_t[k], asynch=True) + batch_gpu_t[k].copy_(batch_cpu_t[k]) elif isinstance(batch_cpu_t[k], torch.LongTensor): if k not in batch_gpu_t: batch_gpu_t[k] = torch.cuda.LongTensor(batch_cpu_t[k].size()) - batch_gpu_t[k].copy_(batch_cpu_t[k], asynch=True) + batch_gpu_t[k].copy_(batch_cpu_t[k]) else: batch_gpu_t[k] = batch_cpu_t[k] diff --git a/rlpytorch/runner/parameter_server.py b/rlpytorch/runner/parameter_server.py index eddd6f4f..9bf56d64 100644 --- a/rlpytorch/runner/parameter_server.py +++ b/rlpytorch/runner/parameter_server.py @@ -215,7 +215,7 @@ def process_main(self, i, gpu_id): while True: self.cvs_recv[i].wait() - utils_elf.transfer_cpu2gpu(batch, batch_gpu, asynch=True) + utils_elf.transfer_cpu2gpu(batch, batch_gpu) self.cvs_send[i].notify() self.cb_remote_batch_process(context, batch_gpu)