Python mxnet.ndarray.empty() Examples
The following are 30
code examples of mxnet.ndarray.empty().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.ndarray
, or try the search function
.
![](https://www.programcreek.com/common/static/images/search.png)
Example #1
Source File: utils.py From SNIPER-mxnet with Apache License 2.0 | 6 votes |
def get_executor(sym, ctx, data_inputs, initializer=None): data_shapes = {k: v.shape for k, v in data_inputs.items()} arg_names = sym.list_arguments() aux_names = sym.list_auxiliary_states() param_names = list(set(arg_names) - set(data_inputs.keys())) arg_shapes, output_shapes, aux_shapes = sym.infer_shape(**data_shapes) arg_name_shape = {k: s for k, s in zip(arg_names, arg_shapes)} params = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names} params_grad = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names} aux_states = {k: nd.empty(s, ctx=ctx) for k, s in zip(aux_names, aux_shapes)} exe = sym.bind(ctx=ctx, args=dict(params, **data_inputs), args_grad=params_grad, aux_states=aux_states) if initializer is not None: for k, v in params.items(): initializer(k, v) return exe, params, params_grad, aux_states
Example #2
Source File: lfw_comparison_and_plot_roc.py From MobileFace with MIT License | 6 votes |
def load_dataset_bin(self): name = 'lfw' path = os.path.join(self.lfw_dir, name+".bin") bins, issame_list = pickle.load(open(path, 'rb')) data_list = [] for flip in [0,1]: data = nd.empty((len(issame_list)*2, 3, self.image_size[0], self.image_size[1])) data_list.append(data) for i in xrange(len(issame_list)*2): _bin = bins[i] img = mx.image.imdecode(_bin) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) data_list[flip][i][:] = img if i%1000==0: print('loading bin', i) print(data_list[0].shape) return (data_list, issame_list)
Example #3
Source File: verification.py From 1.FaceRecognition with MIT License | 6 votes |
def load_bin(path, image_size): try: with open(path, 'rb') as f: bins, issame_list = pickle.load(f) # py2 except UnicodeDecodeError as e: with open(path, 'rb') as f: bins, issame_list = pickle.load(f, encoding='bytes') # py3 data_list = [] for flip in [0, 1]: data = nd.empty((len(issame_list) * 2, 3, image_size[0], image_size[1])) data_list.append(data) for i in range(len(issame_list) * 2): _bin = bins[i] img = mx.image.imdecode(_bin) if img.shape[1] != image_size[0]: img = mx.image.resize_short(img, image_size[0]) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0, 1]: if flip == 1: img = mx.ndarray.flip(data=img, axis=2) data_list[flip][i][:] = img if i % 1000 == 0: print('loading bin', i) print(data_list[0].shape) return (data_list, issame_list)
Example #4
Source File: lfw.py From 1.FaceRecognition with MIT License | 6 votes |
def load_dataset(lfw_dir, image_size): lfw_pairs = read_pairs(os.path.join(lfw_dir, 'pairs.txt')) lfw_paths, issame_list = get_paths(lfw_dir, lfw_pairs, 'jpg') lfw_data_list = [] for flip in [0,1]: lfw_data = nd.empty((len(lfw_paths), 3, image_size[0], image_size[1])) lfw_data_list.append(lfw_data) i = 0 for path in lfw_paths: with open(path, 'rb') as fin: _bin = fin.read() img = mx.image.imdecode(_bin) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) lfw_data_list[flip][i][:] = img i+=1 if i%1000==0: print('loading lfw', i) print(lfw_data_list[0].shape) print(lfw_data_list[1].shape) return (lfw_data_list, issame_list)
Example #5
Source File: utils.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def get_executor(sym, ctx, data_inputs, initializer=None): data_shapes = {k: v.shape for k, v in data_inputs.items()} arg_names = sym.list_arguments() aux_names = sym.list_auxiliary_states() param_names = list(set(arg_names) - set(data_inputs.keys())) arg_shapes, output_shapes, aux_shapes = sym.infer_shape(**data_shapes) arg_name_shape = {k: s for k, s in zip(arg_names, arg_shapes)} params = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names} params_grad = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names} aux_states = {k: nd.empty(s, ctx=ctx) for k, s in zip(aux_names, aux_shapes)} exe = sym.bind(ctx=ctx, args=dict(params, **data_inputs), args_grad=params_grad, aux_states=aux_states) if initializer is not None: for k, v in params.items(): initializer(k, v) return exe, params, params_grad, aux_states
Example #6
Source File: verification.py From 1.FaceRecognition with MIT License | 6 votes |
def load_bin(path, image_size): bins, issame_list = pickle.load(open(path, 'rb')) data_list = [] for flip in [0,1]: data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1])) data_list.append(data) for i in xrange(len(issame_list)*2): _bin = bins[i] img = mx.image.imdecode(_bin) if img.shape[1]!=image_size[0]: img = mx.image.resize_short(img, image_size[0]) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) data_list[flip][i][:] = img if i%1000==0: print('loading bin', i) print(data_list[0].shape) return (data_list, issame_list)
Example #7
Source File: verification.py From MaskInsightface with Apache License 2.0 | 6 votes |
def load_bin(path, image_size): bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes') data_list = [] for flip in [0,1]: data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1])) data_list.append(data) for i in range(len(issame_list)*2): _bin = bins[i] img = mx.image.imdecode(_bin) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) data_list[flip][i][:] = img if i%1000==0: print('loading bin', i) print(data_list[0].shape) return (data_list, issame_list)
Example #8
Source File: verification.py From insightface with MIT License | 6 votes |
def load_bin(path, image_size): bins, issame_list = pickle.load(open(path, 'rb')) data_list = [] for flip in [0,1]: data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1])) data_list.append(data) for i in xrange(len(issame_list)*2): _bin = bins[i] img = mx.image.imdecode(_bin) if img.shape[1]!=image_size[0]: img = mx.image.resize_short(img, image_size[0]) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) data_list[flip][i][:] = img if i%1000==0: print('loading bin', i) print(data_list[0].shape) return (data_list, issame_list)
Example #9
Source File: lfw.py From MaskInsightface with Apache License 2.0 | 6 votes |
def load_dataset(lfw_dir, image_size): lfw_pairs = read_pairs(os.path.join(lfw_dir, 'pairs.txt')) lfw_paths, issame_list = get_paths(lfw_dir, lfw_pairs, 'jpg') lfw_data_list = [] for flip in [0,1]: lfw_data = nd.empty((len(lfw_paths), 3, image_size[0], image_size[1])) lfw_data_list.append(lfw_data) i = 0 for path in lfw_paths: with open(path, 'rb') as fin: _bin = fin.read() img = mx.image.imdecode(_bin) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) lfw_data_list[flip][i][:] = img i+=1 if i%1000==0: print('loading lfw', i) print(lfw_data_list[0].shape) print(lfw_data_list[1].shape) return (lfw_data_list, issame_list)
Example #10
Source File: lfw.py From insightface with MIT License | 6 votes |
def load_dataset(lfw_dir, image_size): lfw_pairs = read_pairs(os.path.join(lfw_dir, 'pairs.txt')) lfw_paths, issame_list = get_paths(lfw_dir, lfw_pairs, 'jpg') lfw_data_list = [] for flip in [0,1]: lfw_data = nd.empty((len(lfw_paths), 3, image_size[0], image_size[1])) lfw_data_list.append(lfw_data) i = 0 for path in lfw_paths: with open(path, 'rb') as fin: _bin = fin.read() img = mx.image.imdecode(_bin) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) lfw_data_list[flip][i][:] = img i+=1 if i%1000==0: print('loading lfw', i) print(lfw_data_list[0].shape) print(lfw_data_list[1].shape) return (lfw_data_list, issame_list)
Example #11
Source File: verification.py From insightface with MIT License | 6 votes |
def load_bin(path, image_size): try: with open(path, 'rb') as f: bins, issame_list = pickle.load(f) #py2 except UnicodeDecodeError as e: with open(path, 'rb') as f: bins, issame_list = pickle.load(f, encoding='bytes') #py3 data_list = [] for flip in [0,1]: data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1])) data_list.append(data) for i in range(len(issame_list)*2): _bin = bins[i] img = mx.image.imdecode(_bin) if img.shape[1]!=image_size[0]: img = mx.image.resize_short(img, image_size[0]) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) data_list[flip][i][:] = img if i%1000==0: print('loading bin', i) print(data_list[0].shape) return (data_list, issame_list)
Example #12
Source File: utils.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def get_executor(sym, ctx, data_inputs, initializer=None): data_shapes = {k: v.shape for k, v in data_inputs.items()} arg_names = sym.list_arguments() aux_names = sym.list_auxiliary_states() param_names = list(set(arg_names) - set(data_inputs.keys())) arg_shapes, output_shapes, aux_shapes = sym.infer_shape(**data_shapes) arg_name_shape = {k: s for k, s in zip(arg_names, arg_shapes)} params = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names} params_grad = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names} aux_states = {k: nd.empty(s, ctx=ctx) for k, s in zip(aux_names, aux_shapes)} exe = sym.bind(ctx=ctx, args=dict(params, **data_inputs), args_grad=params_grad, aux_states=aux_states) if initializer is not None: for k, v in params.items(): initializer(k, v) return exe, params, params_grad, aux_states
Example #13
Source File: bdk_demo.py From SNIPER-mxnet with Apache License 2.0 | 6 votes |
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None): if grad is None: grad = nd.empty(theta.shape, theta.context) theta1 = theta.asnumpy()[0] theta2 = theta.asnumpy()[1] v1 = sigma1 ** 2 v2 = sigma2 ** 2 vx = sigmax ** 2 denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp( -(X - theta1 - theta2) ** 2 / (2 * vx)) grad_npy = numpy.zeros(theta.shape) grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * ( X - theta1 - theta2) / vx) / denominator).sum() \ + theta1 / v1 grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * ( X - theta1 - theta2) / vx) / denominator).sum() \ + theta2 / v2 grad[:] = grad_npy return grad
Example #14
Source File: bdk_demo.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None): if grad is None: grad = nd.empty(theta.shape, theta.context) theta1 = theta.asnumpy()[0] theta2 = theta.asnumpy()[1] v1 = sigma1 ** 2 v2 = sigma2 ** 2 vx = sigmax ** 2 denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp( -(X - theta1 - theta2) ** 2 / (2 * vx)) grad_npy = numpy.zeros(theta.shape) grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * ( X - theta1 - theta2) / vx) / denominator).sum() \ + theta1 / v1 grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * ( X - theta1 - theta2) / vx) / denominator).sum() \ + theta2 / v2 grad[:] = grad_npy return grad
Example #15
Source File: bdk_demo.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None): if grad is None: grad = nd.empty(theta.shape, theta.context) theta1 = theta.asnumpy()[0] theta2 = theta.asnumpy()[1] v1 = sigma1 ** 2 v2 = sigma2 ** 2 vx = sigmax ** 2 denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp( -(X - theta1 - theta2) ** 2 / (2 * vx)) grad_npy = numpy.zeros(theta.shape) grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * ( X - theta1 - theta2) / vx) / denominator).sum() \ + theta1 / v1 grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * ( X - theta1 - theta2) / vx) / denominator).sum() \ + theta2 / v2 grad[:] = grad_npy return grad
Example #16
Source File: lfw_comparison_and_plot_roc.py From MobileFace with MIT License | 5 votes |
def load_dataset(self): lfw_pairs = self.read_pairs(os.path.join(self.lfw_dir, 'pairs.txt')) lfw_paths, issame_list = self.get_paths(self.lfw_dir, lfw_pairs, 'jpg') lfw_data_list = [] for flip in [0,1]: # lfw_data = nd.empty((len(lfw_paths), 3, image_size[0], image_size[1])) lfw_data = nd.empty((len(lfw_paths), 1, 100, 100)) lfw_data_list.append(lfw_data) i = 0 for path in lfw_paths: with open(path, 'rb') as fin: _bin = fin.read() img = np.asarray(bytearray(_bin), dtype="uint8") img = cv2.imdecode(img, 0) # (100, 100) img = img.reshape((1, img.shape[0], img.shape[1])) # (1, 100, 100) #img = nd.transpose(img, axes=(2, 0, 1)) # (1L, 100L, 100L) img = mx.nd.array(img) # (1L, 100L, 100L) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) lfw_data_list[flip][i][:] = img i+=1 if i%1000==0: print('loading lfw', i) print(lfw_data_list[0].shape) print(lfw_data_list[1].shape) return (lfw_data_list, issame_list)
Example #17
Source File: utils.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def copy_param(exe, new_param=None): if new_param is None: new_param = {k: nd.empty(v.shape, ctx=mx.cpu()) for k,v in exe.arg_dict.items()} for k, v in new_param.items(): exe.arg_dict[k].copyto(v) return new_param
Example #18
Source File: data.py From 1.FaceRecognition with MIT License | 5 votes |
def next(self): """Returns the next batch of data.""" #print('next') batch_size = self.batch_size batch_data = nd.empty((batch_size,)+self.data_shape) batch_label = nd.empty((batch_size,)+self.label_shape) i = 0 #self.cutoff = random.randint(800,1280) try: while i < batch_size: #print('N', i) data, label = self.next_sample() data = nd.array(data) data = nd.transpose(data, axes=(2, 0, 1)) label = nd.array(label) label = nd.transpose(label, axes=(2, 0, 1)) batch_data[i][:] = data batch_label[i][:] = label i += 1 except StopIteration: if i<batch_size: raise StopIteration #return {self.data_name : batch_data, # self.label_name : batch_label} #print(batch_data.shape, batch_label.shape) return mx.io.DataBatch([batch_data], [batch_label, self.weight_mask], batch_size - i)
Example #19
Source File: utils.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def copy_param(exe, new_param=None): if new_param is None: new_param = {k: nd.empty(v.shape, ctx=mx.cpu()) for k,v in exe.arg_dict.items()} for k, v in new_param.items(): exe.arg_dict[k].copyto(v) return new_param
Example #20
Source File: bdk_demo.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def run_synthetic_SGLD(): theta1 = 0 theta2 = 1 sigma1 = numpy.sqrt(10) sigma2 = 1 sigmax = numpy.sqrt(2) X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100) minibatch_size = 1 total_iter_num = 1000000 lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num, factor=0.55) optimizer = mx.optimizer.create('sgld', learning_rate=None, rescale_grad=1.0, lr_scheduler=lr_scheduler, wd=0) updater = mx.optimizer.get_updater(optimizer) theta = mx.random.normal(0, 1, (2,), mx.cpu()) grad = nd.empty((2,), mx.cpu()) samples = numpy.zeros((2, total_iter_num)) start = time.time() for i in xrange(total_iter_num): if (i + 1) % 100000 == 0: end = time.time() print("Iter:%d, Time spent: %f" % (i + 1, end - start)) start = time.time() ind = numpy.random.randint(0, X.shape[0]) synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad= X.shape[0] / float(minibatch_size), grad=grad) updater('theta', grad, theta) samples[:, i] = theta.asnumpy() plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet) plt.colorbar() plt.show()
Example #21
Source File: base.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def __init__(self, data_shapes, sym_gen, params=None, aux_states=None, default_bucket_kwargs=None, learn_init_keys=None, initializer=mx.init.Xavier(factor_type="in", rnd_type="gaussian", magnitude=2), ctx=mx.gpu(), name='Net'): self.sym_gen = sym_gen bucket_kwargs = default_bucket_kwargs.copy() if \ default_bucket_kwargs is not None else dict() self.curr_bucket_key = None self.ctx = ctx self.name = name self.initializer = initializer if params is None: self.params = None self.params_grad = None else: self.params = OrderedDict([(k, v.copyto(ctx)) for k, v in params.items()]) self.params_grad = OrderedDict([(n, nd.empty(v.shape, ctx=ctx)) for n, v in self.params.items()]) if aux_states is not None: self.aux_states = OrderedDict([(k, v.copyto(ctx)) for k, v in aux_states.items()]) else: self.aux_states = None self._buckets = dict() self.learn_init_keys = learn_init_keys if learn_init_keys is not None else [] self.learn_init_key_shapes = {k: data_shapes[k] for k in self.learn_init_keys} self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes) self.acc_grad = None
Example #22
Source File: base.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def __init__(self, data_shapes, sym_gen, params=None, aux_states=None, default_bucket_kwargs=None, learn_init_keys=None, initializer=mx.init.Xavier(factor_type="in", rnd_type="gaussian", magnitude=2), ctx=mx.gpu(), name='Net'): self.sym_gen = sym_gen bucket_kwargs = default_bucket_kwargs.copy() if \ default_bucket_kwargs is not None else dict() self.curr_bucket_key = None self.ctx = ctx self.name = name self.initializer = initializer if params is None: self.params = None self.params_grad = None else: self.params = OrderedDict([(k, v.copyto(ctx)) for k, v in params.items()]) self.params_grad = OrderedDict([(n, nd.empty(v.shape, ctx=ctx)) for n, v in self.params.items()]) if aux_states is not None: self.aux_states = OrderedDict([(k, v.copyto(ctx)) for k, v in aux_states.items()]) else: self.aux_states = None self._buckets = dict() self.learn_init_keys = learn_init_keys if learn_init_keys is not None else [] self.learn_init_key_shapes = {k: data_shapes[k] for k in self.learn_init_keys} self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes) self.acc_grad = None
Example #23
Source File: bdk_demo.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def run_synthetic_SGLD(): theta1 = 0 theta2 = 1 sigma1 = numpy.sqrt(10) sigma2 = 1 sigmax = numpy.sqrt(2) X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100) minibatch_size = 1 total_iter_num = 1000000 lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num, factor=0.55) optimizer = mx.optimizer.create('sgld', learning_rate=None, rescale_grad=1.0, lr_scheduler=lr_scheduler, wd=0) updater = mx.optimizer.get_updater(optimizer) theta = mx.random.normal(0, 1, (2,), mx.cpu()) grad = nd.empty((2,), mx.cpu()) samples = numpy.zeros((2, total_iter_num)) start = time.time() for i in xrange(total_iter_num): if (i + 1) % 100000 == 0: end = time.time() print("Iter:%d, Time spent: %f" % (i + 1, end - start)) start = time.time() ind = numpy.random.randint(0, X.shape[0]) synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad= X.shape[0] / float(minibatch_size), grad=grad) updater('theta', grad, theta) samples[:, i] = theta.asnumpy() plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet) plt.colorbar() plt.show()
Example #24
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def backward(self, grad_out): in_data_nd, out_data_nd, degs = self.saved_tensors grad_in = nd.empty(in_data_nd.shape, ctx=grad_out.context, dtype=grad_out.dtype) if self.reducer == 'mean': grad_out = grad_out / degs grad_out_nd = zerocopy_to_dgl_ndarray(grad_out) K.backward_copy_reduce( self.reducer if self.reducer != 'mean' else 'sum', self.graph, self.target, in_data_nd, out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray_for_write(grad_in), self.in_map[1], self.out_map[1]) # clear saved tensors explicitly self.saved_tensors = None return grad_in
Example #25
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def forward(self, in_data): feat_shape = in_data.shape[1:] out_data = nd.empty((self.out_size,) + feat_shape, ctx=in_data.context, dtype=in_data.dtype) in_data_nd = zerocopy_to_dgl_ndarray(in_data) out_data_nd = zerocopy_to_dgl_ndarray_for_write(out_data) K.copy_reduce( self.reducer if self.reducer != 'mean' else 'sum', self.graph, self.target, in_data_nd, out_data_nd, self.in_map[0], self.out_map[0]) # normalize if mean reducer # NOTE(zihao): this is a temporary hack and we should have better solution in the future. if self.reducer == 'mean': in_ones = nd.ones((in_data.shape[0],), ctx=in_data.context, dtype=in_data.dtype) degs = nd.empty((out_data.shape[0],), ctx=out_data.context, dtype=out_data.dtype) in_ones_nd = zerocopy_to_dgl_ndarray(in_ones) degs_nd = zerocopy_to_dgl_ndarray(degs) K.copy_reduce( 'sum', self.graph, self.target, in_ones_nd, degs_nd, self.in_map[0], self.out_map[0]) # reshape degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.ndim - 1)).clip(1, float('inf')) out_data = out_data / degs else: degs = None self.save_for_backward(in_data_nd, out_data_nd, degs) return out_data
Example #26
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def backward(self, grad_out): lhs_data_nd, rhs_data_nd, out_data_nd, feat_shape, degs = self.saved_tensors if self.reducer == 'mean': grad_out = grad_out / degs grad_out_nd = zerocopy_to_dgl_ndarray(grad_out) grad_lhs = nd.empty((lhs_data_nd.shape[0],) + feat_shape, ctx=grad_out.context, dtype=grad_out.dtype) K.backward_lhs_binary_op_reduce( self.reducer if self.reducer != 'mean' else 'sum', self.binary_op, self.graph, self.lhs, self.rhs, lhs_data_nd, rhs_data_nd, out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray_for_write(grad_lhs), self.lhs_map[1], self.rhs_map[1], self.out_map[1]) grad_lhs = _reduce_grad(grad_lhs, lhs_data_nd.shape) grad_rhs = nd.empty((rhs_data_nd.shape[0],) + feat_shape, ctx=grad_out.context, dtype=grad_out.dtype) K.backward_rhs_binary_op_reduce( self.reducer if self.reducer != 'mean' else 'sum', self.binary_op, self.graph, self.lhs, self.rhs, lhs_data_nd, rhs_data_nd, out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray_for_write(grad_rhs), self.lhs_map[1], self.rhs_map[1], self.out_map[1]) grad_rhs = _reduce_grad(grad_rhs, rhs_data_nd.shape) # clear saved tensors explicitly self.saved_tensors = None return grad_lhs, grad_rhs
Example #27
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def gather_row(data, row_index): # MXNet workaround for empty row index if len(row_index) == 0: if data.shape[0] == 0: return data else: return data[0:0] if isinstance(row_index, nd.NDArray): return nd.take(data, row_index) else: return data[row_index,]
Example #28
Source File: data.py From insightface with MIT License | 5 votes |
def next(self): """Returns the next batch of data.""" #print('next') batch_size = self.batch_size batch_data = nd.empty((batch_size,)+self.data_shape) batch_label = nd.empty((batch_size,)+self.label_shape) i = 0 #self.cutoff = random.randint(800,1280) try: while i < batch_size: #print('N', i) data, label = self.next_sample() data = nd.array(data) data = nd.transpose(data, axes=(2, 0, 1)) label = nd.array(label) label = nd.transpose(label, axes=(2, 0, 1)) batch_data[i][:] = data batch_label[i][:] = label i += 1 except StopIteration: if i<batch_size: raise StopIteration #return {self.data_name : batch_data, # self.label_name : batch_label} #print(batch_data.shape, batch_label.shape) return mx.io.DataBatch([batch_data], [batch_label, self.weight_mask], batch_size - i)
Example #29
Source File: base.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def __init__(self, data_shapes, sym_gen, params=None, aux_states=None, default_bucket_kwargs=None, learn_init_keys=None, initializer=mx.init.Xavier(factor_type="in", rnd_type="gaussian", magnitude=2), ctx=mx.gpu(), name='Net'): self.sym_gen = sym_gen bucket_kwargs = default_bucket_kwargs.copy() if \ default_bucket_kwargs is not None else dict() self.curr_bucket_key = None self.ctx = ctx self.name = name self.initializer = initializer if params is None: self.params = None self.params_grad = None else: self.params = OrderedDict([(k, v.copyto(ctx)) for k, v in params.items()]) self.params_grad = OrderedDict([(n, nd.empty(v.shape, ctx=ctx)) for n, v in self.params.items()]) if aux_states is not None: self.aux_states = OrderedDict([(k, v.copyto(ctx)) for k, v in aux_states.items()]) else: self.aux_states = None self._buckets = dict() self.learn_init_keys = learn_init_keys if learn_init_keys is not None else [] self.learn_init_key_shapes = {k: data_shapes[k] for k in self.learn_init_keys} self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes) self.acc_grad = None
Example #30
Source File: bdk_demo.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def run_synthetic_SGLD(): theta1 = 0 theta2 = 1 sigma1 = numpy.sqrt(10) sigma2 = 1 sigmax = numpy.sqrt(2) X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100) minibatch_size = 1 total_iter_num = 1000000 lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num, factor=0.55) optimizer = mx.optimizer.create('sgld', learning_rate=None, rescale_grad=1.0, lr_scheduler=lr_scheduler, wd=0) updater = mx.optimizer.get_updater(optimizer) theta = mx.random.normal(0, 1, (2,), mx.cpu()) grad = nd.empty((2,), mx.cpu()) samples = numpy.zeros((2, total_iter_num)) start = time.time() for i in xrange(total_iter_num): if (i + 1) % 100000 == 0: end = time.time() print("Iter:%d, Time spent: %f" % (i + 1, end - start)) start = time.time() ind = numpy.random.randint(0, X.shape[0]) synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad= X.shape[0] / float(minibatch_size), grad=grad) updater('theta', grad, theta) samples[:, i] = theta.asnumpy() plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet) plt.colorbar() plt.show()