Python numpy.float32() Examples

The following are code examples for showing how to use numpy.float32(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 7 votes vote down vote up
def from_vector_to_q(v1_c1, v1_c2, v2_c1, v2_c2, v3_c1=(0, 0, 0), v3_c2=(0, 0, 0)):
    """
    Notice: v1 and v2 should not be in the same direction !!!!
    :param v1_c1, v1_c2,: vector 1 in coordinate 1, 2
    :param v2_c1, v2_c2: vector 2 in coordinate 1, 2
    :param v3_c1, v3_c2: optional origin point in coordinate 1, 2
    :return: coordinate rotate quaternion from c1 to c2, translation from v1 to v2
            (qw, qi, qj, qk), (x, y, z)
    """
    v1_c1 = np.asarray(v1_c1, dtype=np.float32)
    v2_c1 = np.asarray(v2_c1, dtype=np.float32)
    v3_c1 = np.asarray(v3_c1, dtype=np.float32)
    v1_c2 = np.asarray(v1_c2, dtype=np.float32)
    v2_c2 = np.asarray(v2_c2, dtype=np.float32)
    v3_c2 = np.asarray(v3_c2, dtype=np.float32)
    c1 = np.asarray((v1_c1 - v3_c1, v2_c1 - v3_c1, np.cross(v1_c1 - v3_c1, v2_c1 - v3_c1)), dtype=np.float32).T
    c2 = np.asarray((v1_c2 - v3_c2, v2_c2 - v3_c2, np.cross(v1_c2 - v3_c2, v2_c2 - v3_c2)), dtype=np.float32).T
    mat = c2.dot(c1.T).dot(np.linalg.pinv(c1.dot(c1.T)))
    return from_matrix_to_q(mat), (v1_c2 - mat.dot(v1_c1) + v2_c2 - mat.dot(v2_c1) + v3_c2 - mat.dot(v3_c1)) / 3.0 
Example 2
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 6 votes vote down vote up
def __init__(self, input_wave_file, output_wave_file, target_phrase):
        self.pop_size = 100
        self.elite_size = 10
        self.mutation_p = 0.005
        self.noise_stdev = 40
        self.noise_threshold = 1
        self.mu = 0.9
        self.alpha = 0.001
        self.max_iters = 3000
        self.num_points_estimate = 100
        self.delta_for_gradient = 100
        self.delta_for_perturbation = 1e3
        self.input_audio = load_wav(input_wave_file).astype(np.float32)
        self.pop = np.expand_dims(self.input_audio, axis=0)
        self.pop = np.tile(self.pop, (self.pop_size, 1))
        self.output_wave_file = output_wave_file
        self.target_phrase = target_phrase
        self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase])) 
Example 3
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 6 votes vote down vote up
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, copy=True, from_img=None):
        if copy:
            img = np.copy(img)

        orig_dtype = img.dtype
        if alpha != 1.0 and img.dtype != np.float32:
            img = img.astype(np.float32, copy=False)

        for rect in self:
            if from_img is not None:
                rect.resize(from_img, img).draw_on_image(img, color=color, alpha=alpha, copy=False)
            else:
                rect.draw_on_image(img, color=color, alpha=alpha, copy=False)

        if orig_dtype != img.dtype:
            img = img.astype(orig_dtype, copy=False)

        return img 
Example 4
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 6 votes vote down vote up
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, copy=True):
        if copy:
            img = np.copy(img)

        orig_dtype = img.dtype
        if alpha != 1.0 and img.dtype != np.float32:
            img = img.astype(np.float32, copy=False)

        for rectimg in self:
            if rectimg is not None:
                rectimg.draw_on_image(img, color=color, alpha=alpha, copy=False)

        if orig_dtype != img.dtype:
            img = img.astype(orig_dtype, copy=False)

        return img 
Example 5
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 6 votes vote down vote up
def to_euler(q):
    # rpy
    sinr = 2.0 * (q[0] * q[1] + q[2] * q[3])
    cosr = 1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2])
    roll = math.atan2(sinr, cosr)

    sinp = 2.0 * (q[0] * q[2] - q[3] * q[1])
    if math.fabs(sinp) >= 1.:
        pitch = math.copysign(np.pi / 2., sinp)
    else:
        pitch = math.asin(sinp)

    siny = 2.0 * (q[0] * q[3] + q[1] * q[2])
    cosy = 1.0 - 2.0 * (q[2] * q[2] + q[3] * q[3])
    yaw = math.atan2(siny, cosy)

    return np.asarray((roll, pitch, yaw), np.float32) 
Example 6
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 6 votes vote down vote up
def from_vector_array_to_q(v_c1, v_c2):
    """
    Calculate transform quaternion and translation vector from vector pairs in two coordinate
    :param v_c1: list or tuple of vector in source coordinate
    :param v_c2: list or tuple of vector in target coordinate
    :return: coordinate rotate quaternion from c1 to c2, translation from v1 to v2
            (qw, qi, qj, qk), (x, y, z)
    """
    if len(v_c1) != len(v_c2) or len(v_c1) <= 3:
        print ("Error! on enough vector pair or length of two array is different")
        return (1, 0, 0, 0), (0, 0, 0)

    v_c1 = np.asarray(v_c1, dtype=np.float32).T
    v_c2 = np.asarray(v_c2, dtype=np.float32).T
    mean_c1 = np.mean(v_c1, axis=1)
    mean_c2 = np.mean(v_c1, axis=1)
    v_c1 -= mean_c1
    v_c2 -= mean_c2

    mat = v_c2.dot(v_c1.T).dot(np.linalg.pinv(v_c1.dot(v_c1.T)))
    return from_matrix_to_q(mat), np.mean(v_c2 - mat.dot(v_c1), 1) 
Example 7
Project: aospy   Author: spencerahill   File: data_loader.py    Apache License 2.0 6 votes vote down vote up
def _maybe_cast_to_float64(da):
    """Cast DataArrays to np.float64 if they are of type np.float32.

    Parameters
    ----------
    da : xr.DataArray
        Input DataArray

    Returns
    -------
    DataArray

    """
    if da.dtype == np.float32:
        logging.warning('Datapoints were stored using the np.float32 datatype.'
                        'For accurate reduction operations using bottleneck, '
                        'datapoints are being cast to the np.float64 datatype.'
                        ' For more information see: https://github.com/pydata/'
                        'xarray/issues/1346')
        return da.astype(np.float64)
    else:
        return da 
Example 8
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    Apache License 2.0 6 votes vote down vote up
def set_values(name, param, pretrained):
#{{{
    """
    Initialize a network parameter with pretrained values.
    We check that sizes are compatible.
    """
    param_value = param.get_value()
    if pretrained.size != param_value.size:
        raise Exception(
            "Size mismatch for parameter %s. Expected %i, found %i."
            % (name, param_value.size, pretrained.size)
        )
    param.set_value(np.reshape(
        pretrained, param_value.shape
    ).astype(np.float32))
#}}} 
Example 9
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def sgd(self, cost, params,constraints={}, lr=0.01):
#{{{
        """
        Stochatic gradient descent.
        """
        updates = []
        
        lr = theano.shared(np.float32(lr).astype(floatX))
        gradients = self.get_gradients(cost, params)
        
        for p, g in zip(params, gradients):
            v=-lr*g;
            new_p=p+v;
            # apply constraints
            if p in constraints:
                c=constraints[p];
                new_p=c(new_p);
            updates.append((p, new_p))

        return updates
#}}} 
Example 10
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def sgdmomentum(self, cost, params,constraints={}, lr=0.01,consider_constant=None, momentum=0.):
        """
        Stochatic gradient descent with momentum. Momentum has to be in [0, 1)
        """
        # Check that the momentum is a correct value
        assert 0 <= momentum < 1

        lr = theano.shared(np.float32(lr).astype(floatX))
        momentum = theano.shared(np.float32(momentum).astype(floatX))

        gradients = self.get_gradients(cost, params)
        velocities = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, velocity in zip(params, gradients, velocities):
            new_velocity = momentum * velocity - lr * gradient
            updates.append((velocity, new_velocity))
            new_p=param+new_velocity;
            # apply constraints
            if param in constraints:
                c=constraints[param];
                new_p=c(new_p);
            updates.append((param, new_p))
        return updates 
Example 11
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None):
        """
        Adagrad. Based on http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
        """
        lr = theano.shared(np.float32(lr).astype(floatX))
        epsilon = theano.shared(np.float32(epsilon).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, gsum in zip(params, gradients, gsums):
            new_gsum = gsum + gradient ** 2.
            updates.append((gsum, new_gsum))
            updates.append((param, param - lr * gradient / (T.sqrt(gsum + epsilon))))
        return updates 
Example 12
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None):
        """
        Adadelta. Based on:
        http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
        """
        rho = theano.shared(np.float32(rho).astype(floatX))
        epsilon = theano.shared(np.float32(epsilon).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
        accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, accu_gradient, accu_delta in zip(params, gradients, accu_gradients, accu_deltas):
            new_accu_gradient = rho * accu_gradient + (1. - rho) * gradient ** 2.
            delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient
            new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2.
            updates.append((accu_gradient, new_accu_gradient))
            updates.append((accu_delta, new_accu_delta))
            updates.append((param, param + delta_x))
        return updates 
Example 13
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None):
        """
        RMSProp.
        """
        lr = theano.shared(np.float32(lr).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params]

        updates = []

        for param, gradient, accumulator in zip(params, gradients, accumulators):
            new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2
            updates.append((accumulator, new_accumulator))

            new_param = param - lr * gradient / T.sqrt(new_accumulator + eps)
            updates.append((param, new_param))

        return updates 
Example 14
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    Apache License 2.0 6 votes vote down vote up
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type int. output_i is 1 if
        targets_i is within top-k values of predictions_i
    '''
    predictions_top_k = T.argsort(predictions)[:, -k:]
    result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
    return result


# CONVOLUTIONS 
Example 15
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    Apache License 2.0 6 votes vote down vote up
def ctc_path_probs(predict, Y, alpha=1e-4):
    smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
    L = T.log(smoothed_predict)
    zeros = T.zeros_like(L[0])
    log_first = zeros

    f_skip_idxs = ctc_create_skip_idxs(Y)
    b_skip_idxs = ctc_create_skip_idxs(Y[::-1])  # there should be a shortcut to calculating this

    def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
        f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
        b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
        return f_active_next, log_f_next, b_active_next, log_b_next

    [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
        step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

    idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
    mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
    log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
    return log_probs, mask 
Example 16
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test.py    MIT License 6 votes vote down vote up
def _get_rois_blob(im_rois, im_scale_factors):
    """Converts RoIs into network inputs.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        im_scale_factors (list): scale factors as returned by _get_image_blob
    Returns:
        blob (ndarray): R x 5 matrix of RoIs in the image pyramid
    """
    rois_blob_real = []

    for i in range(len(im_scale_factors)):
        rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
        rois_blob = np.hstack((levels, rois))
        rois_blob_real.append(rois_blob.astype(np.float32, copy=False))

    return rois_blob_real 
Example 17
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test_train.py    MIT License 6 votes vote down vote up
def _get_rois_blob(im_rois, im_scale_factors):
    """Converts RoIs into network inputs.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        im_scale_factors (list): scale factors as returned by _get_image_blob
    Returns:
        blob (ndarray): R x 5 matrix of RoIs in the image pyramid
    """
    rois_blob_real = []

    for i in range(len(im_scale_factors)):
        rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
        rois_blob = np.hstack((levels, rois))
        rois_blob_real.append(rois_blob.astype(np.float32, copy=False))

    return rois_blob_real 
Example 18
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 19
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
Example 20
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def get_logits(new_input, length, first=[]):
    """
    Compute the logits for a given waveform.

    First, preprocess with the TF version of MFC above,
    and then call DeepSpeech on the features.
    """
    # new_input = tf.Print(new_input, [tf.shape(new_input)])

    # We need to init DeepSpeech the first time we're called
    if first == []:
        first.append(False)
        # Okay, so this is ugly again.
        # We just want it to not crash.
        tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
        DeepSpeech.initialize_globals()
        print('initialized deepspeech globals')

    batch_size = new_input.get_shape()[0]

    # 1. Compute the MFCCs for the input audio
    # (this is differentable with our implementation above)
    empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
    new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
    features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)

    # 2. We get to see 9 frames at a time to make our decision,
    # so concatenate them together.
    features = tf.reshape(features, [new_input.get_shape()[0], -1])
    features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
    features = tf.reshape(features, [batch_size, -1, 19*26])

    # 3. Whiten the data
    mean, var = tf.nn.moments(features, axes=[0,1,2])
    features = (features-mean)/(var**.5)

    # 4. Finally we process it with DeepSpeech
    logits = DeepSpeech.BiRNN(features, length, [0]*10)

    return logits 
Example 21
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 22
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 23
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)

    global WORDS
    WORDS = tf.Variable(c2v, name = "words")

    inp = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder")

    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

    global cfilter
    with tf.variable_scope('CNN_Layer') as scope:
        cfilter = tf.get_variable(
            "cfilter",
            shape = [FLAGS.mrank + 1, 2 * FLAGS.num_hidden, 1, 2 * FLAGS.num_hidden],
            regularizer = tf.contrib.layers.l2_regularizer(0.0001),
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            dtype = tf.float32)

    return inp, hidden_W, hidden_b 
Example 24
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 25
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 26
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm4_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 27
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_time_paper.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 28
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 29
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 30
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train_paper.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 31
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 32
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 33
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train_paper.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 34
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 35
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 36
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 37
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm3_crf_time.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 38
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_time.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 39
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 5 votes vote down vote up
def random_translate(image, steering_angle, range_x, range_y):
    """
    Randomly shift the image virtially and horizontally (translation).
    """
    trans_x = range_x * (np.random.rand() - 0.5)
    trans_y = range_y * (np.random.rand() - 0.5)
    steering_angle += trans_x * 0.002
    trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])
    height, width = image.shape[:2]
    image = cv2.warpAffine(image, trans_m, (width, height))
    return image, steering_angle 
Example 40
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: SampleIO.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def extract_sample(img, image_mean=None, resize=-1):
    """Extract image content from image string or from file
    TAKE:
    input - either file content as string or numpy array
    image_mean - numpy array of image mean or a values of size (1,3)
    resize - to resize image, set resize > 0; otherwise, don't resize
    """
    try:
        # if input is a file name, then read image; otherwise decode_imgstr
        if type(img) is np.ndarray:
            img_data = img
        else:
            img_data = decode_imgstr(img)
        if type(resize) in [tuple, list]:
            # resize in two dimensions
            img_data = scipy.misc.imresize(img_data, (resize[0], resize[1]))
        elif resize > 0:
            img_data = scipy.misc.imresize(img_data, (resize, resize))
        img_data = img_data.astype(np.float32, copy=False)
        img_data = img_data[:, :, ::-1]
        # change channel for caffe:
        img_data = img_data.transpose(2, 0, 1)  # to CxHxW
        # substract_mean
        if image_mean is not None:
            img_data = substract_mean(img_data, image_mean)
        return img_data
    except:
        print sys.exc_info()[0], sys.exc_info()[1]
        return 
Example 41
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: BasePythonDataLayer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def forward(self, bottom, top):
        blob = self.get_next_minibatch()
        for i in range(len(blob)):
            top[i].reshape(*(blob[i].shape))
            top[i].data[...] = blob[i].astype(np.float32, copy=False) 
Example 42
Project: chainer-openai-transformer-lm   Author: soskek   File: utils.py    MIT License 5 votes vote down vote up
def stsb_label_encoding(labels, nclass=6):
    """
    Label encoding from Tree LSTM paper (Tai, Socher, Manning)
    """
    Y = np.zeros((len(labels), nclass)).astype(np.float32)
    for j, y in enumerate(labels):
        for i in range(nclass):
            if i == np.floor(y) + 1:
                Y[j, i] = y - np.floor(y)
            if i == np.floor(y):
                Y[j, i] = np.floor(y) - y + 1
    return Y 
Example 43
Project: chainer-openai-transformer-lm   Author: soskek   File: utils.py    MIT License 5 votes vote down vote up
def _identity_init(shape, dtype, partition_info, scale):
    n = shape[-1]
    w = np.eye(n) * scale
    if len([s for s in shape if s != 1]) == 2:
        w = w.reshape(shape)
    return w.astype(np.float32) 
Example 44
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def transform_roc(X1, X2, X3):
    n_batch = len(X1)
    xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32)
    mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32)
    start = encoder['_start_']
    delimiter = encoder['_delimiter_']
    for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
        x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [clf_token]
        x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
        l12 = len(x12)
        l13 = len(x13)
        xmb[i, 0, :l12, 0] = x12
        xmb[i, 1, :l13, 0] = x13
        mmb[i, 0, :l12] = 1
        mmb[i, 1, :l13] = 1
    xmb[:, :, :, 1] = np.arange(
        n_vocab + n_special, n_vocab + n_special + n_ctx)
    return xmb, mmb 
Example 45
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def transform_sst(X1):
    n_batch = len(X1)
    xmb = np.zeros((n_batch, 1, n_ctx, 2), dtype=np.int32)
    mmb = np.zeros((n_batch, 1, n_ctx), dtype=np.float32)
    start = encoder['_start_']
    delimiter = encoder['_delimiter_']
    for i, x1, in enumerate(X1):
        x1 = [start] + x1[:max_len] + [clf_token]
        l1 = len(x1)
        xmb[i, 0, :l1, 0] = x1
        mmb[i, 0, :l1] = 1
    xmb[:, :, :, 1] = np.arange(
        n_vocab + n_special, n_vocab + n_special + n_ctx)
    return xmb, mmb 
Example 46
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 5 votes vote down vote up
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, thickness=1, copy=copy):
        assert img.dtype in [np.uint8, np.float32, np.int32, np.int64]

        result = np.copy(img) if copy else img
        for i in range(thickness):
            y = [self.y1-i, self.y1-i, self.y2+i, self.y2+i]
            x = [self.x1-i, self.x2+i, self.x2+i, self.x1-i]
            rr, cc = draw.polygon_perimeter(y, x, shape=img.shape)
            if alpha >= 0.99:
                result[rr, cc, 0] = color[0]
                result[rr, cc, 1] = color[1]
                result[rr, cc, 2] = color[2]
            else:
                if result.dtype == np.float32:
                    result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
                    result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
                    result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
                    result = np.clip(result, 0, 255)
                else:
                    result = result.astype(np.float32)
                    result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
                    result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
                    result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
                    result = np.clip(result, 0, 255).astype(np.uint8)

        return result 
Example 47
Project: cat-bbs   Author: aleju   File: common.py    MIT License 5 votes vote down vote up
def draw_heatmap(img, heatmap, alpha=0.5):
    """Draw a heatmap overlay over an image."""
    assert len(heatmap.shape) == 2 or \
        (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
    assert img.dtype in [np.uint8, np.int32, np.int64]
    assert heatmap.dtype in [np.float32, np.float64]

    if img.shape[0:2] != heatmap.shape[0:2]:
        heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
        heatmap_rs = ia.imresize_single_image(
            heatmap_rs[..., np.newaxis],
            img.shape[0:2],
            interpolation="nearest"
        )
        heatmap = np.squeeze(heatmap_rs) / 255.0

    cmap = plt.get_cmap('jet')
    heatmap_cmapped = cmap(heatmap)
    heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
    heatmap_cmapped = heatmap_cmapped * 255
    mix = (1-alpha) * img + alpha * heatmap_cmapped
    mix = np.clip(mix, 0, 255).astype(np.uint8)
    return mix 
Example 48
Project: cat-bbs   Author: aleju   File: predict_video.py    MIT License 5 votes vote down vote up
def find_bbs(img, model, conf_threshold, input_size):
    """Find bounding boxes in an image."""
    # pad image so that its square
    img_pad, (pad_top, pad_right, pad_bottom, pad_left) = to_aspect_ratio_add(img, 1.0, return_paddings=True)

    # resize padded image to desired input size
    # "linear" interpolation seems to be enough here for 400x400 or larger images
    # change to "area" or "cubic" for marginally better quality
    img_rs = ia.imresize_single_image(img_pad, (input_size, input_size), interpolation="linear")

    # convert to torch-ready input variable
    inputs_np = (np.array([img_rs])/255.0).astype(np.float32).transpose(0, 3, 1, 2)
    inputs = torch.from_numpy(inputs_np)
    inputs = Variable(inputs, volatile=True)
    if GPU >= 0:
        inputs = inputs.cuda(GPU)

    # apply model and measure the model's time
    time_start = time.time()
    outputs_pred = model(inputs)
    time_req = time.time() - time_start

    # process the model's output (i.e. convert heatmaps to BBs)
    result = ModelResult(
        outputs_pred,
        inputs_np,
        img,
        (pad_top, pad_right, pad_bottom, pad_left)
    )
    bbs = result.get_bbs()

    return bbs, time_req 
Example 49
Project: explirefit   Author: codogogo   File: data_helper.py    Apache License 2.0 5 votes vote down vote up
def load_vocabulary_embeddings(vocabulary_inv, embeddings, emb_size, padding = "<PAD/>"):
	voc_embs = []
	for i in range(len(vocabulary_inv)):
		if i not in vocabulary_inv:
			raise Exception("Index not in index vocabulary!" + " Index: " + str(i))
		word = vocabulary_inv[i]
		if word == padding:
			voc_embs.append(np.random.uniform(-1.0, 1.0, size = [emb_size]))
		elif word not in embeddings:
			raise Exception("Word not found in embeddings! " + word)
		else:
			 voc_embs.append(embeddings[word])
	return np.array(voc_embs, dtype = np.float32) 
Example 50
Project: explirefit   Author: codogogo   File: confusion_matrix.py    Apache License 2.0 5 votes vote down vote up
def compute_all_scores(self):
		self.class_performances = {}
		for i in range(len(self.labels)):
			tp = np.float32(self.matrix[i][i])
			fp_plus_tp = np.float32(np.sum(self.matrix, axis = 0)[i])
			fn_plus_tp = np.float32(np.sum(self.matrix, axis = 1)[i])
			p = tp / fp_plus_tp
			r = tp / fn_plus_tp
			self.class_performances[self.labels[i]] = (p, r, 2*p*r/(p+r))

		self.microf1 = np.float32(np.trace(self.matrix)) / np.sum(self.matrix)
		self.macrof1 = float(sum([x[2] for x in self.class_performances.values()])) / float(len(self.labels))
		self.macroP = float(sum([x[0] for x in self.class_performances.values()])) / float(len(self.labels))
		self.macroR = float(sum([x[1] for x in self.class_performances.values()])) / float(len(self.labels))
		self.accuracy = float(sum([self.matrix[i, i] for i in range(len(self.labels))])) / float(np.sum(self.matrix))