Python mxnet.ndarray.array() Examples

The following are 30 code examples of mxnet.ndarray.array(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.ndarray , or try the search function .
Example #1
Source File: test_contrib_autograd.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_out_grads():
    x = nd.ones((3, 5))
    dx = nd.zeros_like(x)
    mark_variables([x], [dx])
    da = None
    db = nd.array([1,2,3,4,5])
    dc = nd.array([5,4,3,2,1])

    with train_section():
        a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
        backward([a, b, c], [da, db, dc])

    assert (dx.asnumpy() == np.array(
        [[1,1,1,1,1],
         [1,2,3,4,5],
         [5,4,3,2,1]])).all() 
Example #2
Source File: data.py    From insightface with MIT License 6 votes vote down vote up
def next_sample(self):
      """Helper function for reading in next sample."""
      if self.cur >= len(self.seq):
        raise StopIteration
      idx = self.seq[self.cur]
      self.cur += 1
      uv_path = self.uv_file_list[idx]
      image_path = self.image_file_list[idx]
      uvmap = np.load(uv_path)
      img = cv2.imread(image_path)[:,:,::-1]#to rgb
      hlabel = uvmap
      #print(hlabel.shape)
      #hlabel = np.array(header.label).reshape( (self.output_label_size, self.output_label_size, self.num_classes) )
      hlabel /= self.input_img_size

      return img, hlabel 
Example #3
Source File: iterators.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def reset(self):
        """Resets the iterator to the beginning of the data."""
        self.curr_idx = 0
        #shuffle data in each bucket
        random.shuffle(self.idx)
        for i, buck in enumerate(self.sentences):
            self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],
                                                                                            self.sentences[i],
                                                                                            self.characters[i],
                                                                                            self.label[i])

        self.ndindex = []
        self.ndsent = []
        self.ndchar = []
        self.ndlabel = []

        #for each bucket of data
        for i, buck in enumerate(self.sentences):
            #append the lists with an array
            self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))
            self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))
            self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))
            self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype)) 
Example #4
Source File: faces_classer.py    From 1.FaceRecognition with MIT License 6 votes vote down vote up
def face_create_lib(args, npz_embs, npz_emb_len, embedding, item, fd, max_nums):
# 得到当前ID的总个数
    id_sum = npz_embs.shape[0]

    # 为新的人脸入库,创建一个新的文件夹
    new_lib_dir = os.path.join(args.outdir, '%08d' % id_sum)
    os.mkdir(new_lib_dir)

    # 为了统一,扩展一个维度
    embedding = np.expand_dims(embedding, axis=0)

    # 特征向量以及对应的ID图片的数目,都进行垂直拼接
    npz_embs = np.vstack((npz_embs, embedding.reshape(1, -1)))
    npz_emb_len = np.vstack((npz_emb_len, np.array([[1]])))

    new_img_path = os.path.join(new_lib_dir, '00000' + args.encoding)
    old_img_path = os.path.join(args.indir, item[1])

    fd.write(old_img_path + '\t' + new_img_path + '\t' + str(max_nums) + '\n\n')
    shutil.copyfile(old_img_path, new_img_path)

    if args.delete:
        os.remove(old_img_path)
    return npz_embs,npz_emb_len 
Example #5
Source File: data.py    From insightface with MIT License 6 votes vote down vote up
def pairwise_dists(self, embeddings):
      nd_embedding_list = []
      for i in xrange(self.ctx_num):
        nd_embedding = mx.nd.array(embeddings, mx.gpu(i))
        nd_embedding_list.append(nd_embedding)
      nd_pdists = []
      pdists = []
      for idx in xrange(embeddings.shape[0]):
        emb_idx = idx%self.ctx_num
        nd_embedding = nd_embedding_list[emb_idx]
        a_embedding = nd_embedding[idx]
        body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
        body = body*body
        body = mx.nd.sum_axis(body, axis=1)
        nd_pdists.append(body)
        if len(nd_pdists)==self.ctx_num or idx==embeddings.shape[0]-1:
          for x in nd_pdists:
            pdists.append(x.asnumpy())
          nd_pdists = []
      return pdists 
Example #6
Source File: sampler.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def draw(self, true_classes):
        """Draw samples from log uniform distribution and returns sampled candidates,
        expected count for true classes and sampled classes."""
        range_max = self.range_max
        num_sampled = self.num_sampled
        ctx = true_classes.context
        log_range = math.log(range_max + 1)
        num_tries = 0
        true_classes = true_classes.reshape((-1,))
        sampled_classes, num_tries = self.sampler.sample_unique(num_sampled)

        true_cls = true_classes.as_in_context(ctx).astype('float64')
        prob_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range
        count_true = self._prob_helper(num_tries, num_sampled, prob_true)

        sampled_classes = ndarray.array(sampled_classes, ctx=ctx, dtype='int64')
        sampled_cls_fp64 = sampled_classes.astype('float64')
        prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
        count_sampled = self._prob_helper(num_tries, num_sampled, prob_sampled)
        return [sampled_classes, count_true, count_sampled] 
Example #7
Source File: data.py    From 1.FaceRecognition with MIT License 6 votes vote down vote up
def next_sample(self):
      """Helper function for reading in next sample."""
      if self.cur >= len(self.seq):
        raise StopIteration
      idx = self.seq[self.cur]
      self.cur += 1
      uv_path = self.uv_file_list[idx]
      image_path = self.image_file_list[idx]
      uvmap = np.load(uv_path)
      img = cv2.imread(image_path)[:,:,::-1]#to rgb
      hlabel = uvmap
      #print(hlabel.shape)
      #hlabel = np.array(header.label).reshape( (self.output_label_size, self.output_label_size, self.num_classes) )
      hlabel /= self.input_img_size

      return img, hlabel 
Example #8
Source File: test_autograd.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_grad_with_stype():
    def check_grad_with_stype(array_stype, grad_stype, expected_stype):
        x = mx.nd.zeros((1, 1), stype=array_stype)
        x.attach_grad(stype=grad_stype)
        # check grad attached
        assert x.grad.stype == expected_stype
        y = x.detach()
        # check array detached
        assert y.stype == array_stype

    stypes = ['default', 'csr', 'row_sparse']
    for stype in stypes:
        # check the default stype of the gradient (same as the array stype)
        check_grad_with_stype(stype, None, stype)
        for grad_stype in stypes:
            # check the stype of the gradient when provided
            check_grad_with_stype(stype, grad_stype, grad_stype) 
Example #9
Source File: face_detection.py    From insightface with MIT License 6 votes vote down vote up
def generate_anchors_fpn(cfg):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, 15, 15) window.
    """
    RPN_FEAT_STRIDE = []
    for k in cfg:
      RPN_FEAT_STRIDE.append( int(k) )
    RPN_FEAT_STRIDE = sorted(RPN_FEAT_STRIDE, reverse=True)
    anchors = []
    for k in RPN_FEAT_STRIDE:
      v = cfg[str(k)]
      bs = v['BASE_SIZE']
      __ratios = np.array(v['RATIOS'])
      __scales = np.array(v['SCALES'])
      stride = int(k)
      #print('anchors_fpn', bs, __ratios, __scales, file=sys.stderr)
      r = generate_anchors(bs, __ratios, __scales, stride)
      #print('anchors_fpn', r.shape, file=sys.stderr)
      anchors.append(r)

    return anchors 
Example #10
Source File: data.py    From insightface with MIT License 6 votes vote down vote up
def next_sample(self):
      """Helper function for reading in next sample."""
      if self.cur >= len(self.seq):
        raise StopIteration
      idx = self.seq[self.cur]
      self.cur += 1
      s = self.imgrec.read_idx(idx)
      header, img = recordio.unpack(s)
      img = mx.image.imdecode(img).asnumpy()
      hlabel = np.array(header.label).reshape( (self.num_classes,2) )
      if not config.label_xfirst:
        hlabel = hlabel[:,::-1] #convert to X/W first
      annot = {'scale': config.base_scale}

      #ul = np.array( (50000,50000), dtype=np.int32)
      #br = np.array( (0,0), dtype=np.int32)
      #for i in range(hlabel.shape[0]):
      #  h = int(hlabel[i][0])
      #  w = int(hlabel[i][1])
      #  key = np.array((h,w))
      #  ul = np.minimum(key, ul)
      #  br = np.maximum(key, br)

      return img, hlabel, annot 
Example #11
Source File: sampler.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def draw(self, true_classes):
        """Draw samples from log uniform distribution and returns sampled candidates,
        expected count for true classes and sampled classes."""
        range_max = self.range_max
        num_sampled = self.num_sampled
        ctx = true_classes.context
        log_range = math.log(range_max + 1)
        num_tries = 0
        true_classes = true_classes.reshape((-1,))
        sampled_classes, num_tries = self.sampler.sample_unique(num_sampled)

        true_cls = true_classes.as_in_context(ctx).astype('float64')
        prob_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range
        count_true = self._prob_helper(num_tries, num_sampled, prob_true)

        sampled_classes = ndarray.array(sampled_classes, ctx=ctx, dtype='int64')
        sampled_cls_fp64 = sampled_classes.astype('float64')
        prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
        count_sampled = self._prob_helper(num_tries, num_sampled, prob_sampled)
        return [sampled_classes, count_true, count_sampled] 
Example #12
Source File: iterators.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def reset(self):
        """Resets the iterator to the beginning of the data."""
        self.curr_idx = 0
        #shuffle data in each bucket
        random.shuffle(self.idx)
        for i, buck in enumerate(self.sentences):
            self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],
                                                                                            self.sentences[i],
                                                                                            self.characters[i],
                                                                                            self.label[i])

        self.ndindex = []
        self.ndsent = []
        self.ndchar = []
        self.ndlabel = []

        #for each bucket of data
        for i, buck in enumerate(self.sentences):
            #append the lists with an array
            self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))
            self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))
            self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))
            self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype)) 
Example #13
Source File: tensor.py    From dgl with Apache License 2.0 6 votes vote down vote up
def pad_packed_tensor(input, lengths, value, l_min=None):
    old_shape = input.shape
    if isinstance(lengths, nd.NDArray):
        max_len = as_scalar(input.max())
    else:
        max_len = builtins.max(lengths)

    if l_min is not None:
        max_len = builtins.max(max_len, l_min)

    batch_size = len(lengths)
    ctx = input.context
    dtype = input.dtype
    x = nd.full((batch_size * max_len, *old_shape[1:]), value, ctx=ctx, dtype=dtype)
    index = []
    for i, l in enumerate(lengths):
        index.extend(range(i * max_len, i * max_len + l))
    index = nd.array(index, ctx=ctx)
    return scatter_row(x, index, input).reshape(batch_size, max_len, *old_shape[1:]) 
Example #14
Source File: data.py    From 1.FaceRecognition with MIT License 5 votes vote down vote up
def compress_aug(self, img):
      buf = BytesIO()
      img = Image.fromarray(img.asnumpy(), 'RGB')
      q = random.randint(2, 20)
      img.save(buf, format='JPEG', quality=q)
      buf = buf.getvalue()
      img = Image.open(BytesIO(buf))
      return nd.array(np.asarray(img, 'float32')) 
Example #15
Source File: data.py    From 1.FaceRecognition with MIT License 5 votes vote down vote up
def saturation_aug(self, src, x):
      alpha = 1.0 + random.uniform(-x, x)
      coef = nd.array([[[0.299, 0.587, 0.114]]])
      gray = src * coef
      gray = nd.sum(gray, axis=2, keepdims=True)
      gray *= (1.0 - alpha)
      src *= alpha
      src += gray
      return src 
Example #16
Source File: sync_loader_helper.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def split_and_load(data, ctx_list, batch_axis=0, even_split=True, multiplier=1):
    """Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
    each slice to one context in `ctx_list`.

    Parameters
    ----------
    data : NDArray
        A batch of data.
    ctx_list : list of Context
        A list of Contexts.
    batch_axis : int, default 0
        The axis along which to slice.
    even_split : bool, default True
        Whether to force all slices to have the same number of elements.
    multiplier : int, default 1
        The batch size has to be the multiples of channel multiplier. Need to investigate further.

    Returns
    -------
    list of NDArray
        Each corresponds to a context in `ctx_list`.
    """
    if not isinstance(data, ndarray.NDArray):
        data = ndarray.array(data, ctx=ctx_list[0])
    if len(ctx_list) == 1:
        return [data.as_in_context(ctx_list[0])]

    slices = split_data(data, len(ctx_list), batch_axis, even_split, multiplier)
    return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)] 
Example #17
Source File: data.py    From 1.FaceRecognition with MIT License 5 votes vote down vote up
def contrast_aug(self, src, x):
      alpha = 1.0 + random.uniform(-x, x)
      coef = nd.array([[[0.299, 0.587, 0.114]]])
      gray = src * coef
      gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
      src *= alpha
      src += gray
      return src 
Example #18
Source File: dqn_run_test.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def calculate_avg_q(samples, qnet):
    total_q = 0.0
    for i in range(len(samples)):
        state = nd.array(samples[i:i + 1], ctx=qnet.ctx) / float(255.0)
        total_q += qnet.forward(is_train=False, data=state)[0].asnumpy().max(axis=1).sum()
    avg_q_score = total_q / float(len(samples))
    return avg_q_score 
Example #19
Source File: lstm_crf.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def _score_sentence(self, feats, tags):
        # Gives the score of a provided tag sequence
        score = nd.array([0])
        tags = nd.concat(nd.array([self.tag2idx[START_TAG]]), *tags, dim=0)
        for i, feat in enumerate(feats):
            score = score + \
                self.transitions.data()[to_scalar(tags[i+1]), to_scalar(tags[i])] + feat[to_scalar(tags[i+1])]
        score = score + self.transitions.data()[self.tag2idx[STOP_TAG],
                                         to_scalar(tags[int(tags.shape[0]-1)])]
        return score 
Example #20
Source File: data.py    From 1.FaceRecognition with MIT License 5 votes vote down vote up
def _pairwise_dists(self, embeddings):
      nd_embedding = mx.nd.array(embeddings, mx.gpu(0))
      pdists = []
      for idx in xrange(embeddings.shape[0]):
        a_embedding = nd_embedding[idx]
        body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
        body = body*body
        body = mx.nd.sum_axis(body, axis=1)
        ret = body.asnumpy()
        #print(ret.shape)
        pdists.append(ret)
      return pdists 
Example #21
Source File: lstm_crf.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def prepare_sequence(seq, word2idx):
    return nd.array([word2idx[w] for w in seq])

# Compute log sum exp is numerically more stable than multiplying probabilities 
Example #22
Source File: gen_megaface.py    From 1.FaceRecognition with MIT License 5 votes vote down vote up
def get_feature(imgs, nets):
  count = len(imgs)
  data = mx.nd.zeros(shape = (count*2, 3, imgs[0].shape[0], imgs[0].shape[1]))
  for idx, img in enumerate(imgs):
    img = img[:,:,::-1] #to rgb
    img = np.transpose( img, (2,0,1) )
    for flipid in [0,1]:
      _img = np.copy(img)
      if flipid==1:
        _img = _img[:,:,::-1]
      _img = nd.array(_img)
      data[count*flipid+idx] = _img

  F = []
  for net in nets:
    db = mx.io.DataBatch(data=(data,))
    net.model.forward(db, is_train=False)
    x = net.model.get_outputs()[0].asnumpy()
    embedding = x[0:count,:] + x[count:,:]
    embedding = sklearn.preprocessing.normalize(embedding)
    #print('emb', embedding.shape)
    F.append(embedding)
  F = np.concatenate(F, axis=1)
  F = sklearn.preprocessing.normalize(F)
  #print('F', F.shape)
  return F 
Example #23
Source File: algos.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def SGD(sym, data_inputs, X, Y, X_test, Y_test, total_iter_num,
        lr=None,
        lr_scheduler=None, prior_precision=1,
        out_grad_f=None,
        initializer=None,
        minibatch_size=100, dev=mx.gpu()):
    if out_grad_f is None:
        label_key = list(set(data_inputs.keys()) - set(['data']))[0]
    exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer)
    optimizer = mx.optimizer.create('sgd', learning_rate=lr,
                                    rescale_grad=X.shape[0] / minibatch_size,
                                    lr_scheduler=lr_scheduler,
                                    wd=prior_precision)
    updater = mx.optimizer.get_updater(optimizer)
    start = time.time()
    for i in range(total_iter_num):
        indices = numpy.random.randint(X.shape[0], size=minibatch_size)
        X_batch = X[indices]
        Y_batch = Y[indices]
        exe.arg_dict['data'][:] = X_batch
        if out_grad_f is None:
            exe.arg_dict[label_key][:] = Y_batch
            exe.forward(is_train=True)
            exe.backward()
        else:
            exe.forward(is_train=True)
            exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev)))
        for k in params:
            updater(k, params_grad[k], params[k])
        if (i + 1) % 500 == 0:
            end = time.time()
            print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start))
            sample_test_acc(exe, X=X_test, Y=Y_test, label_num=10, minibatch_size=100)
            start = time.time()
    return exe, params, params_grad 
Example #24
Source File: tensor.py    From dgl with Apache License 2.0 5 votes vote down vote up
def nonzero_1d(input):
    # TODO: fallback to numpy is unfortunate
    tmp = input.asnumpy()
    tmp = np.nonzero(tmp)[0]
    return nd.array(tmp, ctx=input.context, dtype=input.dtype) 
Example #25
Source File: segmentation.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _mask_transform(self, mask):
        target = np.array(mask).astype('int32')
        target[target == 255] = -1
        return F.array(target, cpu(0)) 
Example #26
Source File: segbase.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _mask_transform(self, mask):
        return F.array(np.array(mask), cpu(0)).astype('int32') 
Example #27
Source File: segbase.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _img_transform(self, img):
        return F.array(np.array(img), cpu(0)) 
Example #28
Source File: tensor_models.py    From dgl with Apache License 2.0 5 votes vote down vote up
def load(self, path, name):
        """Load embeddings.

        Parameters
        ----------
        path : str
            Directory to load the embedding.
        name : str
            Embedding name.
        """
        emb_fname = os.path.join(path, name+'.npy')
        self.emb = nd.array(np.load(emb_fname)) 
Example #29
Source File: __init__.py    From dgl with Apache License 2.0 5 votes vote down vote up
def is_cuda_available():
    # TODO: Does MXNet have a convenient function to test GPU availability/compilation?
    try:
        a = nd.array([1, 2, 3], ctx=mx.gpu())
        return True
    except mx.MXNetError:
        return False 
Example #30
Source File: tensor.py    From dgl with Apache License 2.0 5 votes vote down vote up
def tensor(data, dtype=None):
    # MXNet always returns a float tensor regardless of type inside data.
    # This is a workaround.
    if dtype is None:
        if isinstance(data[0], numbers.Integral):
            dtype = np.int64
        else:
            dtype = np.float32
    return nd.array(data, dtype=dtype)