Python mxnet.ndarray.concat() Examples

The following are 30 code examples of mxnet.ndarray.concat(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.ndarray , or try the search function .
Example #1
Source File: train_cgan.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def query(self, images):
        if self.pool_size == 0:
            return images
        return_images = []
        for image in images:
            image = image.reshape(1,image.shape[0],image.shape[1],image.shape[2])
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size - 1)  # randint is inclusive
                    tmp = self.images[random_id].copy()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:
                    return_images.append(image)
        image_array = return_images[0].copyto(images.context)
        for image in return_images[1:]:
            image_array = nd.concat(image_array,image.copyto(images.context),dim=0)
        return image_array 
Example #2
Source File: model.py    From NER_BiLSTM_CRF_Chinese with Apache License 2.0 6 votes vote down vote up
def _forward_alg(self, feats):
        alphas = [[-10000.] * self.tagset_size]
        alphas[0][self.tag2idx[self.START_TAG]] = 0.
        alphas = nd.array(alphas,ctx=self.ctx)

        for feat in feats:
            alphas_t = [] 
            for next_tag in range(self.tagset_size):
                emit_score = feat[next_tag].reshape((1, -1))
                trans_score = self.transitions[next_tag].reshape((1, -1))
                next_tag_var = alphas + trans_score + emit_score
                alphas_t.append(log_sum_exp(next_tag_var))
            alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
        terminal_var = alphas + self.transitions[self.tag2idx[self.STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha 
Example #3
Source File: face_detector.py    From faster-mobile-retinaface with GNU General Public License v3.0 6 votes vote down vote up
def _retina_solve(self):
        out, res, anchors = iter(self.exec_group.execs[0].outputs), [], []

        for fpn in self._fpn_anchors:
            scores = next(out)[:, -fpn.scales_shape:,
                               :, :].transpose((0, 2, 3, 1))
            deltas = next(out).transpose((0, 2, 3, 1))

            res.append(concat(deltas.reshape((-1, 4)),
                              scores.reshape((-1, 1)), dim=1))

            anchors.append(self._get_runtime_anchors(*deltas.shape[1:3],
                                                     fpn.stride,
                                                     fpn.base_anchors))

        return concat(*res, dim=0), concatenate(anchors) 
Example #4
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 6 votes vote down vote up
def query(self, images):
        if self.pool_size == 0:
            return images
        return_images = []
        for image in images:
            image = image.reshape(1,image.shape[0],image.shape[1],image.shape[2])
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size - 1)  # randint is inclusive
                    tmp = self.images[random_id].copy()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:
                    return_images.append(image)
        image_array = return_images[0].copyto(images.context)
        for image in return_images[1:]:
            image_array = nd.concat(image_array,image.copyto(images.context),dim=0)
        return image_array 
Example #5
Source File: pspnet.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def hybrid_forward(self, F, x):
        feat1 = self.upsample(F, self.conv1(self.pool(F, x, 1)))
        feat2 = self.upsample(F, self.conv2(self.pool(F, x, 2)))
        feat3 = self.upsample(F, self.conv3(self.pool(F, x, 3)))
        feat4 = self.upsample(F, self.conv4(self.pool(F, x, 6)))
        return F.concat(x, feat1, feat2, feat3, feat4, dim=1) 
Example #6
Source File: utils.py    From MXNet-Gluon-Style-Transfer with MIT License 5 votes vote down vote up
def subtract_imagenet_mean_batch(batch):
    """Subtract ImageNet mean pixel-wise from a BGR image."""
    batch = F.swapaxes(batch,0, 1)
    (r, g, b) = F.split(batch, num_outputs=3, axis=0)
    r = r - 123.680
    g = g - 116.779
    b = b - 103.939
    batch = F.concat(r, g, b, dim=0)
    batch = F.swapaxes(batch,0, 1)
    return batch 
Example #7
Source File: utils.py    From MXNet-Gluon-Style-Transfer with MIT License 5 votes vote down vote up
def tensor_save_bgrimage(tensor, filename, cuda=False):
    (b, g, r) = F.split(tensor, num_outputs=3, axis=0)
    tensor = F.concat(r, g, b, dim=0)
    tensor_save_rgbimage(tensor, filename, cuda) 
Example #8
Source File: CapsuleLayer.py    From CapsuleNet-Gluon with MIT License 5 votes vote down vote up
def concact_vectors_in_list(self, vec_list, axis):
        concat_vec = vec_list[0]
        for i in range(1, len(vec_list)):
            concat_vec = nd.concat(concat_vec, vec_list[i], dim=axis)

        return concat_vec 
Example #9
Source File: ssd_inference.py    From deeplearning-benchmark with Apache License 2.0 5 votes vote down vote up
def get_batch_image_ndarray(input_image_path, input_shape, ctx, batchSize):
    img = mx.image.imread(input_image_path)
    channels = input_shape[0]
    input_height = input_shape[1]
    input_width = input_shape[2]

    img = mx.image.imresize(img, input_height, input_width)
    img = img.transpose((2, 0, 1))  # Channel first
    img = img.expand_dims(axis=0)  # Add a new axis

    result_img = img
    for i in range(1, batchSize):
        result_img = nd.concat(result_img, img, dim=0)

    return result_img.as_in_context(ctx) 
Example #10
Source File: pspnet.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def demo(self, x):
        self._up_kwargs['height'] = x.shape[2]
        self._up_kwargs['width'] = x.shape[3]
        import mxnet.ndarray as F
        feat1 = self.upsample(F, self.conv1(self.pool(F, x, 1)))
        feat2 = self.upsample(F, self.conv2(self.pool(F, x, 2)))
        feat3 = self.upsample(F, self.conv3(self.pool(F, x, 3)))
        feat4 = self.upsample(F, self.conv4(self.pool(F, x, 6)))
        return F.concat(x, feat1, feat2, feat3, feat4, dim=1) 
Example #11
Source File: utils.py    From MXNet-Gluon-Style-Transfer with MIT License 5 votes vote down vote up
def subtract_imagenet_mean_preprocess_batch(batch):
    """Subtract ImageNet mean pixel-wise from a BGR image."""
    batch = F.swapaxes(batch,0, 1)
    (r, g, b) = F.split(batch, num_outputs=3, axis=0)
    r = r - 123.680
    g = g - 116.779
    b = b - 103.939
    batch = F.concat(b, g, r, dim=0)
    batch = F.swapaxes(batch,0, 1)
    return batch 
Example #12
Source File: train_cgan.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def hybrid_forward(self, F, x, *args, **kwargs):
        if self.outermost:
            return self.model(x)
        else:
            return nd.concat([x, self.model(x)],1)

# Defines the PatchGAN discriminator with the specified arguments. 
Example #13
Source File: kaggle_k_fold_cross_validation.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
          weight_decay, batch_size):
    """Trains the model and predicts on the test data set."""
    net = get_net()
    _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
                 weight_decay, batch_size)
    preds = net(X_test).asnumpy()
    test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
    submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
    submission.to_csv('submission.csv', index=False) 
Example #14
Source File: kaggle_k_fold_cross_validation.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
                       learning_rate, weight_decay, batch_size):
    """Conducts k-fold cross validation for the model."""
    assert k > 1
    fold_size = X_train.shape[0] // k

    train_loss_sum = 0.0
    test_loss_sum = 0.0
    for test_idx in range(k):
        X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
                                                   fold_size, :]
        y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
        val_train_defined = False
        for i in range(k):
            if i != test_idx:
                X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
                y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
                if not val_train_defined:
                    X_val_train = X_cur_fold
                    y_val_train = y_cur_fold
                    val_train_defined = True
                else:
                    X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
                    y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
        net = get_net()
        train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
                           learning_rate, weight_decay, batch_size)
        train_loss_sum += train_loss
        test_loss = get_rmse_log(net, X_val_test, y_val_test)
        print("Test loss: %f" % test_loss)
        test_loss_sum += test_loss
    return train_loss_sum / k, test_loss_sum / k

# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation. 
Example #15
Source File: utils.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def preprocess_batch(batch):
    batch = F.swapaxes(batch, 0, 1)
    (r, g, b) = F.split(batch, num_outputs=3, axis=0)
    batch = F.concat(b, g, r, dim=0)
    batch = F.swapaxes(batch, 0, 1)
    return batch 
Example #16
Source File: utils.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def add_imagenet_mean_batch(batch):
    batch = F.swapaxes(batch,0, 1)
    (b, g, r) = F.split(batch, num_outputs=3, axis=0)
    r = r + 123.680
    g = g + 116.779
    b = b + 103.939
    batch = F.concat(b, g, r, dim=0)
    batch = F.swapaxes(batch,0, 1)
    """
    batch = denormalizer(batch)
    """
    return batch 
Example #17
Source File: utils.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def subtract_imagenet_mean_batch(batch):
    """Subtract ImageNet mean pixel-wise from a BGR image."""
    batch = F.swapaxes(batch,0, 1)
    (r, g, b) = F.split(batch, num_outputs=3, axis=0)
    r = r - 123.680
    g = g - 116.779
    b = b - 103.939
    batch = F.concat(r, g, b, dim=0)
    batch = F.swapaxes(batch,0, 1)
    return batch 
Example #18
Source File: lstm_crf.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def _viterbi_decode(self, feats):
        backpointers = []

        # Initialize the viterbi variables in log space
        vvars = nd.full((1, self.tagset_size), -10000.)
        vvars[0, self.tag2idx[START_TAG]] = 0

        for feat in feats:
            bptrs_t = []  # holds the backpointers for this step
            viterbivars_t = []  # holds the viterbi variables for this step

            for next_tag in range(self.tagset_size):
                # next_tag_var[i] holds the viterbi variable for tag i at the
                # previous step, plus the score of transitioning
                # from tag i to next_tag.
                # We don't include the emission scores here because the max
                # does not depend on them (we add them in below)
                next_tag_var = vvars + self.transitions.data()[next_tag]
                best_tag_id = argmax(next_tag_var)
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0, best_tag_id])
            # Now add in the emission scores, and assign vvars to the set
            # of viterbi variables we just computed
            vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1))
            backpointers.append(bptrs_t)

        # Transition to STOP_TAG
        terminal_var = vvars + self.transitions.data()[self.tag2idx[STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0, best_tag_id]

        # Follow the back pointers to decode the best path.
        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        # Pop off the start tag (we dont want to return that to the caller)
        start = best_path.pop()
        assert start == self.tag2idx[START_TAG]  # Sanity check
        best_path.reverse()
        return path_score, best_path 
Example #19
Source File: utils.py    From MXNet-Gluon-Style-Transfer with MIT License 5 votes vote down vote up
def add_imagenet_mean_batch(batch):
    batch = F.swapaxes(batch,0, 1)
    (b, g, r) = F.split(batch, num_outputs=3, axis=0)
    r = r + 123.680
    g = g + 116.779
    b = b + 103.939
    batch = F.concat(b, g, r, dim=0)
    batch = F.swapaxes(batch,0, 1)
    """
    batch = denormalizer(batch)
    """
    return batch 
Example #20
Source File: utils.py    From MXNet-Gluon-Style-Transfer with MIT License 5 votes vote down vote up
def preprocess_batch(batch):
    batch = F.swapaxes(batch, 0, 1)
    (r, g, b) = F.split(batch, num_outputs=3, axis=0)
    batch = F.concat(b, g, r, dim=0)
    batch = F.swapaxes(batch, 0, 1)
    return batch 
Example #21
Source File: model.py    From NER_BiLSTM_CRF_Chinese with Apache License 2.0 5 votes vote down vote up
def _score_sentence(self, feats, tags):
        score = nd.array([0],ctx=self.ctx)
        tags = nd.concat(nd.array([self.tag2idx[self.START_TAG]],ctx=self.ctx), *tags, dim=0)
        for i, feat in enumerate(feats):
            score = score + \
                self.transitions[to_scalar(tags[i+1]), to_scalar(tags[i])] + feat[to_scalar(tags[i+1])]
        score = score + self.transitions[self.tag2idx[self.STOP_TAG],
                                         to_scalar(tags[int(tags.shape[0]-1)])]
        return score 
Example #22
Source File: model.py    From NER_BiLSTM_CRF_Chinese with Apache License 2.0 5 votes vote down vote up
def _viterbi_decode(self, feats):
        backpointers = []

        vvars = nd.full((1, self.tagset_size), -10000.,ctx=self.ctx)
        vvars[0, self.tag2idx[self.START_TAG]] = 0

        for feat in feats:
            bptrs_t = []
            viterbivars_t = []

            for next_tag in range(self.tagset_size):

                next_tag_var = vvars + self.transitions[next_tag]
                best_tag_id = argmax(next_tag_var)
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0, best_tag_id])

            vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1))
            backpointers.append(bptrs_t)

        terminal_var = vvars + self.transitions[self.tag2idx[self.STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0, best_tag_id]

        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        start = best_path.pop()
        assert start == self.tag2idx[self.START_TAG]
        best_path.reverse()
        return path_score, best_path 
Example #23
Source File: utils.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def tensor_save_bgrimage(tensor, filename, cuda=False):
    (b, g, r) = F.split(tensor, num_outputs=3, axis=0)
    tensor = F.concat(r, g, b, dim=0)
    tensor_save_rgbimage(tensor, filename, cuda) 
Example #24
Source File: utils.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def subtract_imagenet_mean_batch(batch):
    """Subtract ImageNet mean pixel-wise from a BGR image."""
    batch = F.swapaxes(batch,0, 1)
    (r, g, b) = F.split(batch, num_outputs=3, axis=0)
    r = r - 123.680
    g = g - 116.779
    b = b - 103.939
    batch = F.concat(r, g, b, dim=0)
    batch = F.swapaxes(batch,0, 1)
    return batch 
Example #25
Source File: utils.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def subtract_imagenet_mean_preprocess_batch(batch):
    """Subtract ImageNet mean pixel-wise from a BGR image."""
    batch = F.swapaxes(batch,0, 1)
    (r, g, b) = F.split(batch, num_outputs=3, axis=0)
    r = r - 123.680
    g = g - 116.779
    b = b - 103.939
    batch = F.concat(b, g, r, dim=0)
    batch = F.swapaxes(batch,0, 1)
    return batch 
Example #26
Source File: utils.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def add_imagenet_mean_batch(batch):
    batch = F.swapaxes(batch,0, 1)
    (b, g, r) = F.split(batch, num_outputs=3, axis=0)
    r = r + 123.680
    g = g + 116.779
    b = b + 103.939
    batch = F.concat(b, g, r, dim=0)
    batch = F.swapaxes(batch,0, 1)
    """
    batch = denormalizer(batch)
    """
    return batch 
Example #27
Source File: utils.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def preprocess_batch(batch):
    batch = F.swapaxes(batch, 0, 1)
    (r, g, b) = F.split(batch, num_outputs=3, axis=0)
    batch = F.concat(b, g, r, dim=0)
    batch = F.swapaxes(batch, 0, 1)
    return batch 
Example #28
Source File: lstm_crf.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def _score_sentence(self, feats, tags):
        # Gives the score of a provided tag sequence
        score = nd.array([0])
        tags = nd.concat(nd.array([self.tag2idx[START_TAG]]), *tags, dim=0)
        for i, feat in enumerate(feats):
            score = score + \
                self.transitions[to_scalar(tags[i+1]), to_scalar(tags[i])] + feat[to_scalar(tags[i+1])]
        score = score + self.transitions[self.tag2idx[STOP_TAG],
                                         to_scalar(tags[int(tags.shape[0]-1)])]
        return score 
Example #29
Source File: lstm_crf.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def _viterbi_decode(self, feats):
        backpointers = []

        # Initialize the viterbi variables in log space
        vvars = nd.full((1, self.tagset_size), -10000.)
        vvars[0, self.tag2idx[START_TAG]] = 0

        for feat in feats:
            bptrs_t = []  # holds the backpointers for this step
            viterbivars_t = []  # holds the viterbi variables for this step

            for next_tag in range(self.tagset_size):
                # next_tag_var[i] holds the viterbi variable for tag i at the
                # previous step, plus the score of transitioning
                # from tag i to next_tag.
                # We don't include the emission scores here because the max
                # does not depend on them (we add them in below)
                next_tag_var = vvars + self.transitions[next_tag]
                best_tag_id = argmax(next_tag_var)
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0, best_tag_id])
            # Now add in the emission scores, and assign vvars to the set
            # of viterbi variables we just computed
            vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1))
            backpointers.append(bptrs_t)

        # Transition to STOP_TAG
        terminal_var = vvars + self.transitions[self.tag2idx[STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0, best_tag_id]

        # Follow the back pointers to decode the best path.
        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        # Pop off the start tag (we dont want to return that to the caller)
        start = best_path.pop()
        assert start == self.tag2idx[START_TAG]  # Sanity check
        best_path.reverse()
        return path_score, best_path 
Example #30
Source File: kaggle_k_fold_cross_validation.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
                       learning_rate, weight_decay, batch_size):
    """Conducts k-fold cross validation for the model."""
    assert k > 1
    fold_size = X_train.shape[0] // k

    train_loss_sum = 0.0
    test_loss_sum = 0.0
    for test_idx in range(k):
        X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
                                                   fold_size, :]
        y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
        val_train_defined = False
        for i in range(k):
            if i != test_idx:
                X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
                y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
                if not val_train_defined:
                    X_val_train = X_cur_fold
                    y_val_train = y_cur_fold
                    val_train_defined = True
                else:
                    X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
                    y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
        net = get_net()
        train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
                           learning_rate, weight_decay, batch_size)
        train_loss_sum += train_loss
        test_loss = get_rmse_log(net, X_val_test, y_val_test)
        print("Test loss: %f" % test_loss)
        test_loss_sum += test_loss
    return train_loss_sum / k, test_loss_sum / k

# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.