Python utils.normalize() Examples

The following are 22 code examples of utils.normalize(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils , or try the search function .
Example #1
Source File: search.py    From deep-code-search with MIT License 6 votes vote down vote up
def search(config, model, vocab, query, n_results=10):
    model.eval()
    device = next(model.parameters()).device
    desc, desc_len =sent2indexes(query, vocab_desc, config['desc_len'])#convert query into word indices
    desc = torch.from_numpy(desc).unsqueeze(0).to(device)
    desc_len = torch.from_numpy(desc_len).clamp(max=config['desc_len']).to(device)
    with torch.no_grad():
        desc_repr = model.desc_encoding(desc, desc_len).data.cpu().numpy().astype(np.float32) # [1 x dim]
    if config['sim_measure']=='cos': # normalizing vector for fast cosine computation
        desc_repr = normalize(desc_repr) # [1 x dim]
    results =[]
    threads = []
    for i, codevecs_chunk in enumerate(codevecs):
        t = threading.Thread(target=search_thread, args = (results, desc_repr, codevecs_chunk, i, n_results, config['sim_measure']))
        threads.append(t)
    for t in threads:
        t.start()
    for t in threads:#wait until all sub-threads have completed
        t.join()
    return results 
Example #2
Source File: main.py    From deep-code-search with MIT License 6 votes vote down vote up
def search(self, model, vocab, query, n_results=10):
        desc=[convert(vocab, query)]#convert desc sentence to word indices
        padded_desc = pad(desc, self.data_params['desc_len'])
        desc_repr=model.repr_desc([padded_desc])
        desc_repr=desc_repr.astype(np.float32)
        desc_repr = normalize(desc_repr).T # [dim x 1]
        codes, sims = [], []
        threads=[]
        for i,code_reprs_chunk in enumerate(self._code_reprs):
            t = threading.Thread(target=self.search_thread, args = (codes,sims,desc_repr,code_reprs_chunk,i,n_results))
            threads.append(t)
        for t in threads:
            t.start()
        for t in threads:#wait until all sub-threads finish
            t.join()
        return codes,sims 
Example #3
Source File: eval.py    From hfsoftmax with MIT License 5 votes vote down vote up
def main():
    global args
    args = parser.parse_args()

    if not os.path.exists(args.output_path):
        comm = 'python extract_feat.py \
                --arch {} \
                --batch-size {} \
                --input-size {} \
                --feature-dim {} \
                --load-path {} \
                --bin-file {} \
                --output-path {}'\
                .format(args.arch, args.batch_size, args.input_size, args.feature_dim,
                        args.load_path, args.bin_file, args.output_path)
        print(' '.join(comm.split()))
        os.system(comm)

    features = np.load(args.output_path).reshape(-1, args.feature_dim)
    _, lbs = bin_loader(args.bin_file)
    print('feature shape: {}'.format(features.shape))
    assert features.shape[0] == 2 * len(lbs), "{} vs {}".format(
        features.shape[0], 2 * len(lbs))

    features = normalize(features)
    _, _, acc, val, val_std, far = evaluate(features,
                                            lbs,
                                            nrof_folds=args.nfolds,
                                            distance_metric=0)
    print("accuracy: {:.4f}({:.4f})".format(acc.mean(), acc.std())) 
Example #4
Source File: train_search.py    From sgas with MIT License 5 votes vote down vote up
def score_image(type, score, epoch):
  score_img = vutils.make_grid(
    torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(score, 1), 2), 3),
    nrow=7,
    normalize=True,
    pad_value=0.5)
  writer.add_image(type + '_score', score_img, epoch) 
Example #5
Source File: train.py    From nnabla-examples with Apache License 2.0 5 votes vote down vote up
def evaluate(model, pts_true, grid_size, volume_factor, monitor_distances,
             i, save_interval_epoch=1):
    if i % save_interval_epoch != 0:
        return
    pts, vol = utils.compute_pts_vol(model, grid_size, volume_factor)
    mesh = utils.create_mesh_from_volume(vol)
    pcd = mesh.sample_points_poisson_disk(len(pts_true), seed=412)
    pts_pred = np.asarray(pcd.points)
    pts_pred = utils.normalize(pts_pred)
    # Pair-wise distance
    cd0, cd1, cd, hd0, hd1, hd = utils.chamfer_hausdorff_dists(
        pts_pred, pts_true)
    for m, d in zip(monitor_distances, [cd0, cd1, cd, hd0, hd1, hd]):
        m.add(i, d) 
Example #6
Source File: evaluate.py    From nnabla-examples with Apache License 2.0 5 votes vote down vote up
def main(args):
    # Context
    ctx = get_extension_context("cudnn", device_id=args.device_id)
    nn.set_default_context(ctx)

    # Dataset (input is normalized in [-1, 1])
    ds = point_cloud_data_source(args.fpath, knn=-1, test=True)
    pts_true = ds.points

    # Sample from mesh (unnormalized)
    mesh = utils.read_mesh(args.mesh_data_path)
    pcd = mesh.sample_points_poisson_disk(ds.size, seed=412)
    pts_pred = np.asarray(pcd.points)
    pts_pred = utils.normalize(pts_pred)

    # Pair-wise distance
    cd0, cd1, cd, hd0, hd1, hd = utils.chamfer_hausdorff_dists(
        pts_pred, pts_true)

    # Chamfer distance
    print("----- Chamfer distance -----")
    log = """
    One-sided Chamfer distance (Pred, True):   {}
    One-sided Chamfer distance (True, Pred):   {}
    Chamfer distance:                          {}
    """.format(cd0, cd1, cd)
    print(log)

    # Hausdorff distance
    print("----- Hausdorff distance -----")
    log = """
    One-sided Hausdorff distance (Pred, True): {}
    One-sided Hausdorff distance (True, Pred): {}
    Hausdorff distance:                        {}
    """.format(hd0, hd1, hd)
    print(log) 
Example #7
Source File: datasets.py    From nnabla-examples with Apache License 2.0 5 votes vote down vote up
def _preprocess(self, points):
        return utils.normalize(points) 
Example #8
Source File: repr_code.py    From deep-code-search with MIT License 5 votes vote down vote up
def repr_code(args):

    device = torch.device(f"cuda:{args.gpu_id}" if torch.cuda.is_available() else "cpu")
    config=getattr(configs, 'config_'+args.model)()

    ##### Define model ######
    logger.info('Constructing Model..')
    model = getattr(models, args.model)(config)#initialize the model
    if args.reload_from>0:
        ckpt_path = f'./output/{args.model}/{args.dataset}/models/step{args.reload_from}.h5'
        model.load_state_dict(torch.load(ckpt_path, map_location=device))       
    model = model.to(device)   
    model.eval()

    data_path = args.data_path+args.dataset+'/'
    use_set = eval(config['dataset_name'])(data_path, config['use_names'], config['name_len'],
                              config['use_apis'], config['api_len'],
                              config['use_tokens'], config['tokens_len'])

    data_loader = torch.utils.data.DataLoader(dataset=use_set, batch_size=args.batch_size, 
                                  shuffle=False, drop_last=False, num_workers=1)

    chunk_id = 0
    vecs, n_processed = [], 0 
    for batch in tqdm(data_loader):
        batch_gpu = [tensor.to(device) for tensor in batch]
        with torch.no_grad():
            reprs = model.code_encoding(*batch_gpu).data.cpu().numpy()
        reprs = reprs.astype(np.float32) # [batch x dim]
        if config['sim_measure']=='cos': # do normalization for fast cosine computation
            reprs = normalize(reprs)
        vecs.append(reprs)
        n_processed=n_processed+ batch[0].size(0)
        if n_processed>= args.chunk_size:
            output_path = f"{data_path}{config['use_codevecs'][:-3]}_part{chunk_id}.h5"
            save_vecs(np.vstack(vecs), output_path)
            chunk_id+=1
            vecs, n_processed = [], 0
    # save the last chunk (probably incomplete)
    output_path = f"{data_path}{config['use_codevecs'][:-3]}_part{chunk_id}.h5"
    save_vecs(np.vstack(vecs), output_path) 
Example #9
Source File: main.py    From deep-code-search with MIT License 5 votes vote down vote up
def repr_code(self, model):
        logger.info('Loading the use data ..')
        methnames = data_loader.load_hdf5(self.data_path+self.data_params['use_methname'],0,-1)
        apiseqs = data_loader.load_hdf5(self.data_path+self.data_params['use_apiseq'],0,-1)
        tokens = data_loader.load_hdf5(self.data_path+self.data_params['use_tokens'],0,-1) 
        methnames = pad(methnames, self.data_params['methname_len'])
        apiseqs = pad(apiseqs, self.data_params['apiseq_len'])
        tokens = pad(tokens, self.data_params['tokens_len'])
        
        logger.info('Representing code ..')
        vecs= model.repr_code([methnames, apiseqs, tokens], batch_size=10000)
        vecs= vecs.astype(np.float)
        vecs= normalize(vecs)
        return vecs 
Example #10
Source File: model_based_policy.py    From cs294-112_hws with MIT License 5 votes vote down vote up
def _dynamics_func(self, state, action, reuse):
        """
            Takes as input a state and action, and predicts the next state

            returns:
                next_state_pred: predicted next state

            implementation details (in order):
                (a) Normalize both the state and action by using the statistics of self._init_dataset and
                    the utils.normalize function
                (b) Concatenate the normalized state and action
                (c) Pass the concatenated, normalized state-action tensor through a neural network with
                    self._nn_layers number of layers using the function utils.build_mlp. The resulting output
                    is the normalized predicted difference between the next state and the current state
                (d) Unnormalize the delta state prediction, and add it to the current state in order to produce
                    the predicted next state

        """
        ### PROBLEM 1
        ### YOUR CODE HERE
        state_norm = utils.normalize(state, self._init_dataset.state_mean, self._init_dataset.state_std)
        action_norm = utils.normalize(action, self._init_dataset.action_mean, self._init_dataset.action_std)
        input_layer = tf.concat([state_norm, action_norm], axis=1)
        delta_pred_norm = utils.build_mlp(
            input_layer, 
            self._state_dim, 
            scope='dynamics_func',
            n_layers=self._nn_layers,
            reuse=reuse
        )
        delta_pred = utils.unnormalize(delta_pred_norm, self._init_dataset.delta_state_mean, self._init_dataset.delta_state_std)
        next_state_pred = state + delta_pred

        return next_state_pred 
Example #11
Source File: maml.py    From MT-net with MIT License 5 votes vote down vote up
def forward_conv_withT(self, inp, weights, reuse=False, scope=''):
        # reuse is for the normalization parameters.
        def conv_tout(inp, cweight, bweight, rweight, reuse, scope, activation=tf.nn.relu, max_pool_pad='VALID',
                       residual=False):
            stride, no_stride = [1, 2, 2, 1], [1, 1, 1, 1]
            if FLAGS.max_pool:
                conv_output = tf.nn.conv2d(inp, cweight, no_stride, 'SAME') + bweight
            else:
                conv_output = tf.nn.conv2d(inp, cweight, stride, 'SAME') + bweight
            conv_output = tf.nn.conv2d(conv_output, rweight, no_stride, 'SAME')
            normed = normalize(conv_output, activation, reuse, scope)
            if FLAGS.max_pool:
                normed = tf.nn.max_pool(normed, stride, stride, max_pool_pad)
            return normed

        channels = self.channels
        inp = tf.reshape(inp, [-1, self.img_size, self.img_size, channels])
        hidden1 = conv_tout(inp, weights['conv1'], weights['b1'], weights['conv1_f'], reuse, scope + '0')
        hidden2 = conv_tout(hidden1, weights['conv2'], weights['b2'], weights['conv2_f'], reuse, scope + '1')
        hidden3 = conv_tout(hidden2, weights['conv3'], weights['b3'], weights['conv3_f'], reuse, scope + '2')
        hidden4 = conv_tout(hidden3, weights['conv4'], weights['b4'], weights['conv4_f'], reuse, scope + '3')

        if FLAGS.datasource == 'miniimagenet':
            # last hidden layer is 6x6x64-ish, reshape to a vector
            hidden4 = tf.reshape(hidden4, [-1, np.prod([int(dim) for dim in hidden4.get_shape()[1:]])])
        else:
            hidden4 = tf.reduce_mean(hidden4, [1, 2])
        hidden5 = tf.matmul(hidden4, weights['w5']) + weights['b5']
        return tf.matmul(hidden5, weights['w5_f']) 
Example #12
Source File: maml.py    From MT-net with MIT License 5 votes vote down vote up
def forward_fc_withT(self, inp, weights, reuse=False):
        hidden = tf.matmul(tf.matmul(inp, weights['w1']) + weights['b1'], weights['w1_f'])
        hidden = normalize(hidden, activation=tf.nn.relu, reuse=reuse, scope='1')
        hidden = tf.matmul(tf.matmul(hidden, weights['w2']) + weights['b2'], weights['w2_f'])
        hidden = normalize(hidden, activation=tf.nn.relu, reuse=reuse, scope='2')
        hidden = tf.matmul(tf.matmul(hidden, weights['w3']) + weights['b3'], weights['w3_f'])
        return hidden 
Example #13
Source File: maml.py    From MT-net with MIT License 5 votes vote down vote up
def forward_fc(self, inp, weights, reuse=False):
        hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'],
                           activation=tf.nn.relu, reuse=reuse, scope='0')
        for i in range(1, len(self.dim_hidden)):
            hidden = normalize(tf.matmul(hidden, weights['w' + str(i + 1)]) + weights['b' + str(i + 1)],
                               activation=tf.nn.relu, reuse=reuse, scope=str(i + 1))
        return tf.matmul(hidden, weights['w' + str(len(self.dim_hidden) + 1)]) + \
               weights['b' + str(len(self.dim_hidden) + 1)] 
Example #14
Source File: models.py    From cornerwise with MIT License 5 votes vote down vote up
def save(self, *args, **kwargs):
        if not self.handle:
            self.handle = utils.normalize(self.name)
        super().save(*args, **kwargs) 
Example #15
Source File: models.py    From cornerwise with MIT License 5 votes vote down vote up
def forgiving_dateparse(dt, tz=pytz.utc):
    if isinstance(dt, str):
        dt = dateparse.parse_datetime(dt)
    elif not isinstance(dt, datetime):
        return None

    if dt.tzinfo:
        try:
            return tz.normalize(dt)
        except AttributeError:
            return tz.localize(dt.replace(tzinfo=None))
    return tz.localize(dt) 
Example #16
Source File: models.py    From cornerwise with MIT License 5 votes vote down vote up
def local_now(region_name=None):
    tz = region_tz(region_name)
    return tz.normalize(pytz.utc.localize(datetime.utcnow())) 
Example #17
Source File: models.py    From cornerwise with MIT License 5 votes vote down vote up
def localize_dt(dt: datetime, region_name=None):
    tz = region_tz(region_name)
    return tz.normalize(dt) if dt.tzinfo else tz.localize(dt) 
Example #18
Source File: maml.py    From maml with MIT License 5 votes vote down vote up
def forward_fc(self, inp, weights, reuse=False):
        hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'], activation=tf.nn.relu, reuse=reuse, scope='0')
        for i in range(1,len(self.dim_hidden)):
            hidden = normalize(tf.matmul(hidden, weights['w'+str(i+1)]) + weights['b'+str(i+1)], activation=tf.nn.relu, reuse=reuse, scope=str(i+1))
        return tf.matmul(hidden, weights['w'+str(len(self.dim_hidden)+1)]) + weights['b'+str(len(self.dim_hidden)+1)] 
Example #19
Source File: maml.py    From cactus-maml with MIT License 5 votes vote down vote up
def forward_fc(self, inp, weights, prefix, reuse=False):
        hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'], activation=tf.nn.relu, reuse=reuse, scope='0')
        for i in range(1,len(self.dim_hidden)):
            hidden = normalize(tf.matmul(hidden, weights['w'+str(i+1)]) + weights['b'+str(i+1)], activation=tf.nn.relu, reuse=reuse, scope=str(i+1))
        logits = tf.matmul(hidden, weights['w'+str(len(self.dim_hidden)+1)]) + weights['b'+str(len(self.dim_hidden)+1)]
        if 'val' in prefix:
            logits = tf.gather(logits, tf.range(self.dim_output_val), axis=1)
        return logits 
Example #20
Source File: model_based_policy.py    From cs294-112_hws with MIT License 5 votes vote down vote up
def _setup_training(self, state_ph, next_state_ph, next_state_pred):
        """
            Takes as input the current state, next state, and predicted next state, and returns
            the loss and optimizer for training the dynamics model

            returns:
                loss: Scalar loss tensor
                optimizer: Operation used to perform gradient descent

            implementation details (in order):
                (a) Compute both the actual state difference and the predicted state difference
                (b) Normalize both of these state differences by using the statistics of self._init_dataset and
                    the utils.normalize function
                (c) The loss function is the mean-squared-error between the normalized state difference and
                    normalized predicted state difference
                (d) Create the optimizer by minimizing the loss using the Adam optimizer with self._learning_rate

        """
        ### PROBLEM 1
        ### YOUR CODE HERE
        diff = next_state_ph - state_ph
        diff_pred = next_state_pred - state_ph
        diff_norm = utils.normalize(diff, self._init_dataset.delta_state_mean, self._init_dataset.delta_state_std)
        diff_pred_norm = utils.normalize(diff_pred, self._init_dataset.delta_state_mean, self._init_dataset.delta_state_std)
        loss = tf.losses.mean_squared_error(diff_norm, diff_pred_norm)
        optimizer = tf.train.AdamOptimizer(self._learning_rate).minimize(loss)

        return loss, optimizer 
Example #21
Source File: main.py    From face_recognition_framework with MIT License 4 votes vote down vote up
def evaluation(test_loader, model, num, outfeat_fn, benchmark):
    load_feat = True
    if not os.path.isfile(outfeat_fn) or not load_feat:
        features = extract(test_loader, model, num, outfeat_fn, silent=True)
    else:
        print("loading from: {}".format(outfeat_fn))
        features = np.fromfile(outfeat_fn, dtype=np.float32).reshape(-1, args.model.feature_dim)

    if benchmark == "megaface":
        r = test_megaface(features)
        log(' * Megaface: 1e-6 [{}], 1e-5 [{}], 1e-4 [{}]'.format(r[-1], r[-2], r[-3]))
        return r[-1]
    else:
        features = normalize(features)
        _, lbs = bin_loader("{}/{}.bin".format(args.test.test_root, benchmark))
        _, _, acc, val, val_std, far = evaluate(
            features, lbs, nrof_folds=args.test.nfolds, distance_metric=0)
    
        log(" * {}: accuracy: {:.4f}({:.4f})".format(benchmark, acc.mean(), acc.std()))
        return acc.mean()


#def evaluation_old(test_loader, model, num, outfeat_fn, benchmark):
#    load_feat = False
#    if not os.path.isfile(outfeat_fn) or not load_feat:
#        features = extract(test_loader, model, num, outfeat_fn)
#    else:
#        log("Loading features: {}".format(outfeat_fn))
#        features = np.fromfile(outfeat_fn, dtype=np.float32).reshape(-1, args.model.feature_dim)
#
#    if benchmark == "megaface":
#        r = test.test_megaface(features)
#        log(' * Megaface: 1e-6 [{}], 1e-5 [{}], 1e-4 [{}]'.format(r[-1], r[-2], r[-3]))
#        with open(outfeat_fn[:-4] + ".txt", 'w') as f:
#            f.write(' * Megaface: 1e-6 [{}], 1e-5 [{}], 1e-4 [{}]'.format(r[-1], r[-2], r[-3]))
#        return r[-1]
#    elif benchmark == "ijba":
#        r = test.test_ijba(features)
#        log(' * IJB-A: {} [{}], {} [{}], {} [{}]'.format(r[0][0], r[0][1], r[1][0], r[1][1], r[2][0], r[2][1]))
#        with open(outfeat_fn[:-4] + ".txt", 'w') as f:
#            f.write(' * IJB-A: {} [{}], {} [{}], {} [{}]'.format(r[0][0], r[0][1], r[1][0], r[1][1], r[2][0], r[2][1]))
#        return r[2][1]
#    elif benchmark == "lfw":
#        r = test.test_lfw(features)
#        log(' * LFW: mean: {} std: {}'.format(r[0], r[1]))
#        with open(outfeat_fn[:-4] + ".txt", 'w') as f:
#            f.write(' * LFW: mean: {} std: {}'.format(r[0], r[1]))
#        return r[0] 
Example #22
Source File: vis.py    From VisualizationCNN with MIT License 4 votes vote down vote up
def conv_filter(model, layer_name, img):
    """Get the filter of conv layer.

    Args:
           model: keras model.
           layer_name: name of layer in the model.
           img: processed input image.

    Returns:
           filters.
    """
    # this is the placeholder for the input images
    input_img = model.input

    # get the symbolic outputs of each "key" layer (we gave them unique names).
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])

    try:
        layer_output = layer_dict[layer_name].output
    except:
        raise Exception('Not layer named {}!'.format(layer_name))

    kept_filters = []
    for i in range(layer_output.shape[-1]):
        loss = K.mean(layer_output[:, :, :, i])
        # compute the gradient of the input picture with this loss
        grads = K.gradients(loss, input_img)[0]

        # normalization trick: we normalize the gradient
        grads = utils.normalize(grads)

        # this function returns the loss and grads given the input picture
        iterate = K.function([input_img], [loss, grads])

        # step size for gradient ascent
        step = 1.
        # run gradient ascent for 20 steps
        fimg = img.copy()

        for j in range(40):
            loss_value, grads_value = iterate([fimg])
            fimg += grads_value * step

        # decode the resulting input image
        fimg = utils.deprocess_image(fimg[0])
        kept_filters.append((fimg, loss_value))

        # sort filter result
        kept_filters.sort(key=lambda x: x[1], reverse=True)

    return np.array([f[0] for f in kept_filters])