Python numpy.float32() Examples

The following are code examples for showing how to use numpy.float32(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 8 votes vote down vote up
def from_vector_to_q(v1_c1, v1_c2, v2_c1, v2_c2, v3_c1=(0, 0, 0), v3_c2=(0, 0, 0)):
    """
    Notice: v1 and v2 should not be in the same direction !!!!
    :param v1_c1, v1_c2,: vector 1 in coordinate 1, 2
    :param v2_c1, v2_c2: vector 2 in coordinate 1, 2
    :param v3_c1, v3_c2: optional origin point in coordinate 1, 2
    :return: coordinate rotate quaternion from c1 to c2, translation from v1 to v2
            (qw, qi, qj, qk), (x, y, z)
    """
    v1_c1 = np.asarray(v1_c1, dtype=np.float32)
    v2_c1 = np.asarray(v2_c1, dtype=np.float32)
    v3_c1 = np.asarray(v3_c1, dtype=np.float32)
    v1_c2 = np.asarray(v1_c2, dtype=np.float32)
    v2_c2 = np.asarray(v2_c2, dtype=np.float32)
    v3_c2 = np.asarray(v3_c2, dtype=np.float32)
    c1 = np.asarray((v1_c1 - v3_c1, v2_c1 - v3_c1, np.cross(v1_c1 - v3_c1, v2_c1 - v3_c1)), dtype=np.float32).T
    c2 = np.asarray((v1_c2 - v3_c2, v2_c2 - v3_c2, np.cross(v1_c2 - v3_c2, v2_c2 - v3_c2)), dtype=np.float32).T
    mat = c2.dot(c1.T).dot(np.linalg.pinv(c1.dot(c1.T)))
    return from_matrix_to_q(mat), (v1_c2 - mat.dot(v1_c1) + v2_c2 - mat.dot(v2_c1) + v3_c2 - mat.dot(v3_c1)) / 3.0 
Example 2
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 7 votes vote down vote up
def __init__(self, input_wave_file, output_wave_file, target_phrase):
        self.pop_size = 100
        self.elite_size = 10
        self.mutation_p = 0.005
        self.noise_stdev = 40
        self.noise_threshold = 1
        self.mu = 0.9
        self.alpha = 0.001
        self.max_iters = 3000
        self.num_points_estimate = 100
        self.delta_for_gradient = 100
        self.delta_for_perturbation = 1e3
        self.input_audio = load_wav(input_wave_file).astype(np.float32)
        self.pop = np.expand_dims(self.input_audio, axis=0)
        self.pop = np.tile(self.pop, (self.pop_size, 1))
        self.output_wave_file = output_wave_file
        self.target_phrase = target_phrase
        self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase])) 
Example 3
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 6 votes vote down vote up
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, copy=True, from_img=None):
        if copy:
            img = np.copy(img)

        orig_dtype = img.dtype
        if alpha != 1.0 and img.dtype != np.float32:
            img = img.astype(np.float32, copy=False)

        for rect in self:
            if from_img is not None:
                rect.resize(from_img, img).draw_on_image(img, color=color, alpha=alpha, copy=False)
            else:
                rect.draw_on_image(img, color=color, alpha=alpha, copy=False)

        if orig_dtype != img.dtype:
            img = img.astype(orig_dtype, copy=False)

        return img 
Example 4
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 6 votes vote down vote up
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, copy=True):
        if copy:
            img = np.copy(img)

        orig_dtype = img.dtype
        if alpha != 1.0 and img.dtype != np.float32:
            img = img.astype(np.float32, copy=False)

        for rectimg in self:
            if rectimg is not None:
                rectimg.draw_on_image(img, color=color, alpha=alpha, copy=False)

        if orig_dtype != img.dtype:
            img = img.astype(orig_dtype, copy=False)

        return img 
Example 5
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 6 votes vote down vote up
def to_euler(q):
    # rpy
    sinr = 2.0 * (q[0] * q[1] + q[2] * q[3])
    cosr = 1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2])
    roll = math.atan2(sinr, cosr)

    sinp = 2.0 * (q[0] * q[2] - q[3] * q[1])
    if math.fabs(sinp) >= 1.:
        pitch = math.copysign(np.pi / 2., sinp)
    else:
        pitch = math.asin(sinp)

    siny = 2.0 * (q[0] * q[3] + q[1] * q[2])
    cosy = 1.0 - 2.0 * (q[2] * q[2] + q[3] * q[3])
    yaw = math.atan2(siny, cosy)

    return np.asarray((roll, pitch, yaw), np.float32) 
Example 6
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 6 votes vote down vote up
def from_vector_array_to_q(v_c1, v_c2):
    """
    Calculate transform quaternion and translation vector from vector pairs in two coordinate
    :param v_c1: list or tuple of vector in source coordinate
    :param v_c2: list or tuple of vector in target coordinate
    :return: coordinate rotate quaternion from c1 to c2, translation from v1 to v2
            (qw, qi, qj, qk), (x, y, z)
    """
    if len(v_c1) != len(v_c2) or len(v_c1) <= 3:
        print ("Error! on enough vector pair or length of two array is different")
        return (1, 0, 0, 0), (0, 0, 0)

    v_c1 = np.asarray(v_c1, dtype=np.float32).T
    v_c2 = np.asarray(v_c2, dtype=np.float32).T
    mean_c1 = np.mean(v_c1, axis=1)
    mean_c2 = np.mean(v_c1, axis=1)
    v_c1 -= mean_c1
    v_c2 -= mean_c2

    mat = v_c2.dot(v_c1.T).dot(np.linalg.pinv(v_c1.dot(v_c1.T)))
    return from_matrix_to_q(mat), np.mean(v_c2 - mat.dot(v_c1), 1) 
Example 7
Project: aospy   Author: spencerahill   File: data_loader.py    Apache License 2.0 6 votes vote down vote up
def _maybe_cast_to_float64(da):
    """Cast DataArrays to np.float64 if they are of type np.float32.

    Parameters
    ----------
    da : xr.DataArray
        Input DataArray

    Returns
    -------
    DataArray

    """
    if da.dtype == np.float32:
        logging.warning('Datapoints were stored using the np.float32 datatype.'
                        'For accurate reduction operations using bottleneck, '
                        'datapoints are being cast to the np.float64 datatype.'
                        ' For more information see: https://github.com/pydata/'
                        'xarray/issues/1346')
        return da.astype(np.float64)
    else:
        return da 
Example 8
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    Apache License 2.0 6 votes vote down vote up
def set_values(name, param, pretrained):
#{{{
    """
    Initialize a network parameter with pretrained values.
    We check that sizes are compatible.
    """
    param_value = param.get_value()
    if pretrained.size != param_value.size:
        raise Exception(
            "Size mismatch for parameter %s. Expected %i, found %i."
            % (name, param_value.size, pretrained.size)
        )
    param.set_value(np.reshape(
        pretrained, param_value.shape
    ).astype(np.float32))
#}}} 
Example 9
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def sgd(self, cost, params,constraints={}, lr=0.01):
#{{{
        """
        Stochatic gradient descent.
        """
        updates = []
        
        lr = theano.shared(np.float32(lr).astype(floatX))
        gradients = self.get_gradients(cost, params)
        
        for p, g in zip(params, gradients):
            v=-lr*g;
            new_p=p+v;
            # apply constraints
            if p in constraints:
                c=constraints[p];
                new_p=c(new_p);
            updates.append((p, new_p))

        return updates
#}}} 
Example 10
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def sgdmomentum(self, cost, params,constraints={}, lr=0.01,consider_constant=None, momentum=0.):
        """
        Stochatic gradient descent with momentum. Momentum has to be in [0, 1)
        """
        # Check that the momentum is a correct value
        assert 0 <= momentum < 1

        lr = theano.shared(np.float32(lr).astype(floatX))
        momentum = theano.shared(np.float32(momentum).astype(floatX))

        gradients = self.get_gradients(cost, params)
        velocities = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, velocity in zip(params, gradients, velocities):
            new_velocity = momentum * velocity - lr * gradient
            updates.append((velocity, new_velocity))
            new_p=param+new_velocity;
            # apply constraints
            if param in constraints:
                c=constraints[param];
                new_p=c(new_p);
            updates.append((param, new_p))
        return updates 
Example 11
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None):
        """
        Adagrad. Based on http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
        """
        lr = theano.shared(np.float32(lr).astype(floatX))
        epsilon = theano.shared(np.float32(epsilon).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, gsum in zip(params, gradients, gsums):
            new_gsum = gsum + gradient ** 2.
            updates.append((gsum, new_gsum))
            updates.append((param, param - lr * gradient / (T.sqrt(gsum + epsilon))))
        return updates 
Example 12
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None):
        """
        Adadelta. Based on:
        http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
        """
        rho = theano.shared(np.float32(rho).astype(floatX))
        epsilon = theano.shared(np.float32(epsilon).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
        accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, accu_gradient, accu_delta in zip(params, gradients, accu_gradients, accu_deltas):
            new_accu_gradient = rho * accu_gradient + (1. - rho) * gradient ** 2.
            delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient
            new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2.
            updates.append((accu_gradient, new_accu_gradient))
            updates.append((accu_delta, new_accu_delta))
            updates.append((param, param + delta_x))
        return updates 
Example 13
Project: Att-ChemdNER   Author: lingluodlut   File: optimization.py    Apache License 2.0 6 votes vote down vote up
def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None):
        """
        RMSProp.
        """
        lr = theano.shared(np.float32(lr).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params]

        updates = []

        for param, gradient, accumulator in zip(params, gradients, accumulators):
            new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2
            updates.append((accumulator, new_accumulator))

            new_param = param - lr * gradient / T.sqrt(new_accumulator + eps)
            updates.append((param, new_param))

        return updates 
Example 14
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    Apache License 2.0 6 votes vote down vote up
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type int. output_i is 1 if
        targets_i is within top-k values of predictions_i
    '''
    predictions_top_k = T.argsort(predictions)[:, -k:]
    result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
    return result


# CONVOLUTIONS 
Example 15
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    Apache License 2.0 6 votes vote down vote up
def ctc_path_probs(predict, Y, alpha=1e-4):
    smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
    L = T.log(smoothed_predict)
    zeros = T.zeros_like(L[0])
    log_first = zeros

    f_skip_idxs = ctc_create_skip_idxs(Y)
    b_skip_idxs = ctc_create_skip_idxs(Y[::-1])  # there should be a shortcut to calculating this

    def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
        f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
        b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
        return f_active_next, log_f_next, b_active_next, log_b_next

    [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
        step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

    idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
    mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
    log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
    return log_probs, mask 
Example 16
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test.py    MIT License 6 votes vote down vote up
def _get_rois_blob(im_rois, im_scale_factors):
    """Converts RoIs into network inputs.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        im_scale_factors (list): scale factors as returned by _get_image_blob
    Returns:
        blob (ndarray): R x 5 matrix of RoIs in the image pyramid
    """
    rois_blob_real = []

    for i in range(len(im_scale_factors)):
        rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
        rois_blob = np.hstack((levels, rois))
        rois_blob_real.append(rois_blob.astype(np.float32, copy=False))

    return rois_blob_real 
Example 17
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test_train.py    MIT License 6 votes vote down vote up
def _get_rois_blob(im_rois, im_scale_factors):
    """Converts RoIs into network inputs.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        im_scale_factors (list): scale factors as returned by _get_image_blob
    Returns:
        blob (ndarray): R x 5 matrix of RoIs in the image pyramid
    """
    rois_blob_real = []

    for i in range(len(im_scale_factors)):
        rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
        rois_blob = np.hstack((levels, rois))
        rois_blob_real.append(rois_blob.astype(np.float32, copy=False))

    return rois_blob_real 
Example 18
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 19
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
Example 20
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def get_logits(new_input, length, first=[]):
    """
    Compute the logits for a given waveform.

    First, preprocess with the TF version of MFC above,
    and then call DeepSpeech on the features.
    """
    # new_input = tf.Print(new_input, [tf.shape(new_input)])

    # We need to init DeepSpeech the first time we're called
    if first == []:
        first.append(False)
        # Okay, so this is ugly again.
        # We just want it to not crash.
        tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
        DeepSpeech.initialize_globals()
        print('initialized deepspeech globals')

    batch_size = new_input.get_shape()[0]

    # 1. Compute the MFCCs for the input audio
    # (this is differentable with our implementation above)
    empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
    new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
    features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)

    # 2. We get to see 9 frames at a time to make our decision,
    # so concatenate them together.
    features = tf.reshape(features, [new_input.get_shape()[0], -1])
    features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
    features = tf.reshape(features, [batch_size, -1, 19*26])

    # 3. Whiten the data
    mean, var = tf.nn.moments(features, axes=[0,1,2])
    features = (features-mean)/(var**.5)

    # 4. Finally we process it with DeepSpeech
    logits = DeepSpeech.BiRNN(features, length, [0]*10)

    return logits 
Example 21
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 22
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 23
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)

    global WORDS
    WORDS = tf.Variable(c2v, name = "words")

    inp = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder")

    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

    global cfilter
    with tf.variable_scope('CNN_Layer') as scope:
        cfilter = tf.get_variable(
            "cfilter",
            shape = [FLAGS.mrank + 1, 2 * FLAGS.num_hidden, 1, 2 * FLAGS.num_hidden],
            regularizer = tf.contrib.layers.l2_regularizer(0.0001),
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            dtype = tf.float32)

    return inp, hidden_W, hidden_b 
Example 24
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 25
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 26
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm4_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 27
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_time_paper.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 28
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 29
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 30
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train_paper.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 31
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 32
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 33
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train_paper.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 34
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 35
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 36
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 37
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm3_crf_time.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 38
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_time.py    MIT License 5 votes vote down vote up
def load_w2v(path, expectDim):
    fp = open(path, "r")
    print("load data from:", path)
    line = fp.readline().strip()
    ss = line.split(" ")
    total = int(ss[0])
    dim = int(ss[1])
    assert (dim == expectDim)
    ws = []
    mv = [0 for i in range(dim)]
    second = -1
    for t in range(total):
        if ss[0] == '<UNK>':
            second = t
        line = fp.readline().strip()
        ss = line.split(" ")
        assert (len(ss) == (dim + 1))
        vals = []
        for i in range(1, dim + 1):
            fv = float(ss[i])
            mv[i - 1] += fv
            vals.append(fv)
        ws.append(vals)
    for i in range(dim):
        mv[i] = mv[i] / total
    assert (second != -1)
    # append one more token , maybe useless
    ws.append(mv)
    if second != 1:
        t = ws[1]
        ws[1] = ws[second]
        ws[second] = t
    fp.close()
    return np.asarray(ws, dtype=np.float32) 
Example 39
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 5 votes vote down vote up
def random_translate(image, steering_angle, range_x, range_y):
    """
    Randomly shift the image virtially and horizontally (translation).
    """
    trans_x = range_x * (np.random.rand() - 0.5)
    trans_y = range_y * (np.random.rand() - 0.5)
    steering_angle += trans_x * 0.002
    trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])
    height, width = image.shape[:2]
    image = cv2.warpAffine(image, trans_m, (width, height))
    return image, steering_angle 
Example 40
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: SampleIO.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def extract_sample(img, image_mean=None, resize=-1):
    """Extract image content from image string or from file
    TAKE:
    input - either file content as string or numpy array
    image_mean - numpy array of image mean or a values of size (1,3)
    resize - to resize image, set resize > 0; otherwise, don't resize
    """
    try:
        # if input is a file name, then read image; otherwise decode_imgstr
        if type(img) is np.ndarray:
            img_data = img
        else:
            img_data = decode_imgstr(img)
        if type(resize) in [tuple, list]:
            # resize in two dimensions
            img_data = scipy.misc.imresize(img_data, (resize[0], resize[1]))
        elif resize > 0:
            img_data = scipy.misc.imresize(img_data, (resize, resize))
        img_data = img_data.astype(np.float32, copy=False)
        img_data = img_data[:, :, ::-1]
        # change channel for caffe:
        img_data = img_data.transpose(2, 0, 1)  # to CxHxW
        # substract_mean
        if image_mean is not None:
            img_data = substract_mean(img_data, image_mean)
        return img_data
    except:
        print sys.exc_info()[0], sys.exc_info()[1]
        return 
Example 41
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: BasePythonDataLayer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def forward(self, bottom, top):
        blob = self.get_next_minibatch()
        for i in range(len(blob)):
            top[i].reshape(*(blob[i].shape))
            top[i].data[...] = blob[i].astype(np.float32, copy=False) 
Example 42
Project: chainer-openai-transformer-lm   Author: soskek   File: utils.py    MIT License 5 votes vote down vote up
def stsb_label_encoding(labels, nclass=6):
    """
    Label encoding from Tree LSTM paper (Tai, Socher, Manning)
    """
    Y = np.zeros((len(labels), nclass)).astype(np.float32)
    for j, y in enumerate(labels):
        for i in range(nclass):
            if i == np.floor(y) + 1:
                Y[j, i] = y - np.floor(y)
            if i == np.floor(y):
                Y[j, i] = np.floor(y) - y + 1
    return Y 
Example 43
Project: chainer-openai-transformer-lm   Author: soskek   File: utils.py    MIT License 5 votes vote down vote up
def _identity_init(shape, dtype, partition_info, scale):
    n = shape[-1]
    w = np.eye(n) * scale
    if len([s for s in shape if s != 1]) == 2:
        w = w.reshape(shape)
    return w.astype(np.float32) 
Example 44
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def transform_roc(X1, X2, X3):
    n_batch = len(X1)
    xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32)
    mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32)
    start = encoder['_start_']
    delimiter = encoder['_delimiter_']
    for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
        x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [clf_token]
        x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
        l12 = len(x12)
        l13 = len(x13)
        xmb[i, 0, :l12, 0] = x12
        xmb[i, 1, :l13, 0] = x13
        mmb[i, 0, :l12] = 1
        mmb[i, 1, :l13] = 1
    xmb[:, :, :, 1] = np.arange(
        n_vocab + n_special, n_vocab + n_special + n_ctx)
    return xmb, mmb 
Example 45
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def transform_sst(X1):
    n_batch = len(X1)
    xmb = np.zeros((n_batch, 1, n_ctx, 2), dtype=np.int32)
    mmb = np.zeros((n_batch, 1, n_ctx), dtype=np.float32)
    start = encoder['_start_']
    delimiter = encoder['_delimiter_']
    for i, x1, in enumerate(X1):
        x1 = [start] + x1[:max_len] + [clf_token]
        l1 = len(x1)
        xmb[i, 0, :l1, 0] = x1
        mmb[i, 0, :l1] = 1
    xmb[:, :, :, 1] = np.arange(
        n_vocab + n_special, n_vocab + n_special + n_ctx)
    return xmb, mmb 
Example 46
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 5 votes vote down vote up
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, thickness=1, copy=copy):
        assert img.dtype in [np.uint8, np.float32, np.int32, np.int64]

        result = np.copy(img) if copy else img
        for i in range(thickness):
            y = [self.y1-i, self.y1-i, self.y2+i, self.y2+i]
            x = [self.x1-i, self.x2+i, self.x2+i, self.x1-i]
            rr, cc = draw.polygon_perimeter(y, x, shape=img.shape)
            if alpha >= 0.99:
                result[rr, cc, 0] = color[0]
                result[rr, cc, 1] = color[1]
                result[rr, cc, 2] = color[2]
            else:
                if result.dtype == np.float32:
                    result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
                    result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
                    result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
                    result = np.clip(result, 0, 255)
                else:
                    result = result.astype(np.float32)
                    result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
                    result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
                    result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
                    result = np.clip(result, 0, 255).astype(np.uint8)

        return result 
Example 47
Project: cat-bbs   Author: aleju   File: common.py    MIT License 5 votes vote down vote up
def draw_heatmap(img, heatmap, alpha=0.5):
    """Draw a heatmap overlay over an image."""
    assert len(heatmap.shape) == 2 or \
        (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
    assert img.dtype in [np.uint8, np.int32, np.int64]
    assert heatmap.dtype in [np.float32, np.float64]

    if img.shape[0:2] != heatmap.shape[0:2]:
        heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
        heatmap_rs = ia.imresize_single_image(
            heatmap_rs[..., np.newaxis],
            img.shape[0:2],
            interpolation="nearest"
        )
        heatmap = np.squeeze(heatmap_rs) / 255.0

    cmap = plt.get_cmap('jet')
    heatmap_cmapped = cmap(heatmap)
    heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
    heatmap_cmapped = heatmap_cmapped * 255
    mix = (1-alpha) * img + alpha * heatmap_cmapped
    mix = np.clip(mix, 0, 255).astype(np.uint8)
    return mix 
Example 48
Project: cat-bbs   Author: aleju   File: predict_video.py    MIT License 5 votes vote down vote up
def find_bbs(img, model, conf_threshold, input_size):
    """Find bounding boxes in an image."""
    # pad image so that its square
    img_pad, (pad_top, pad_right, pad_bottom, pad_left) = to_aspect_ratio_add(img, 1.0, return_paddings=True)

    # resize padded image to desired input size
    # "linear" interpolation seems to be enough here for 400x400 or larger images
    # change to "area" or "cubic" for marginally better quality
    img_rs = ia.imresize_single_image(img_pad, (input_size, input_size), interpolation="linear")

    # convert to torch-ready input variable
    inputs_np = (np.array([img_rs])/255.0).astype(np.float32).transpose(0, 3, 1, 2)
    inputs = torch.from_numpy(inputs_np)
    inputs = Variable(inputs, volatile=True)
    if GPU >= 0:
        inputs = inputs.cuda(GPU)

    # apply model and measure the model's time
    time_start = time.time()
    outputs_pred = model(inputs)
    time_req = time.time() - time_start

    # process the model's output (i.e. convert heatmaps to BBs)
    result = ModelResult(
        outputs_pred,
        inputs_np,
        img,
        (pad_top, pad_right, pad_bottom, pad_left)
    )
    bbs = result.get_bbs()

    return bbs, time_req 
Example 49
Project: explirefit   Author: codogogo   File: data_helper.py    Apache License 2.0 5 votes vote down vote up
def load_vocabulary_embeddings(vocabulary_inv, embeddings, emb_size, padding = "<PAD/>"):
	voc_embs = []
	for i in range(len(vocabulary_inv)):
		if i not in vocabulary_inv:
			raise Exception("Index not in index vocabulary!" + " Index: " + str(i))
		word = vocabulary_inv[i]
		if word == padding:
			voc_embs.append(np.random.uniform(-1.0, 1.0, size = [emb_size]))
		elif word not in embeddings:
			raise Exception("Word not found in embeddings! " + word)
		else:
			 voc_embs.append(embeddings[word])
	return np.array(voc_embs, dtype = np.float32) 
Example 50
Project: explirefit   Author: codogogo   File: confusion_matrix.py    Apache License 2.0 5 votes vote down vote up
def compute_all_scores(self):
		self.class_performances = {}
		for i in range(len(self.labels)):
			tp = np.float32(self.matrix[i][i])
			fp_plus_tp = np.float32(np.sum(self.matrix, axis = 0)[i])
			fn_plus_tp = np.float32(np.sum(self.matrix, axis = 1)[i])
			p = tp / fp_plus_tp
			r = tp / fn_plus_tp
			self.class_performances[self.labels[i]] = (p, r, 2*p*r/(p+r))

		self.microf1 = np.float32(np.trace(self.matrix)) / np.sum(self.matrix)
		self.macrof1 = float(sum([x[2] for x in self.class_performances.values()])) / float(len(self.labels))
		self.macroP = float(sum([x[0] for x in self.class_performances.values()])) / float(len(self.labels))
		self.macroR = float(sum([x[1] for x in self.class_performances.values()])) / float(len(self.labels))
		self.accuracy = float(sum([self.matrix[i, i] for i in range(len(self.labels))])) / float(np.sum(self.matrix)) 
Example 51
Project: UR5_Controller   Author: tsinghua-rll   File: HAPI.py    MIT License 5 votes vote down vote up
def __init__(self, IP_ADDRESS):
        super(HAPI, self).__init__(IP_ADDRESS)
        self.__base_t = np.asarray((0, 0, 0), dtype=np.float32)
        self.__base_q = np.asarray((1, 0, 0, 0), dtype=np.float32) 
Example 52
Project: UR5_Controller   Author: tsinghua-rll   File: HAPI.py    MIT License 5 votes vote down vote up
def isLastMovementEnd(self, select=(1, 1, 1, 1, 1, 1)):
        """
        :param select: 1 if dimension is selected
        :return: boolean
        """
        select = np.asarray(select, dtype=np.float32)
        data = self.rtif.receive()
        tar_rad = np.asarray(data["Target Joint Positions"], dtype=np.float32)
        cur_rad = np.asarray(data["Actual Joint Positions"], dtype=np.float32)
        speed = np.asarray(data["Actual Joint Velocities"])
        return np.max(np.abs((tar_rad - cur_rad) * select)) < 1e-4 and np.max(np.abs(speed * select)) < 1e-1 
Example 53
Project: UR5_Controller   Author: tsinghua-rll   File: HAPI.py    MIT License 5 votes vote down vote up
def set_coordinate_origin(self, ori=None):
        """
        Setting the Coordinate origin point. If ori is None, will automatically use teach mode.
        operating coordinate is: +X from robot center to end tool, +Z toward sky.
        tool coordinate is: the connector of tool towards +X and tool face -Z
        :param ori: 3d tuple (x, y, z) or ((x, y, z), (w ,i, j, k)), or None for teach mode
                    conversion will be automatically done
        :return: basic transform (x,y,z), (w,i,j,k)
        """
        if ori is None:
            self.switch_mode(True)
            self.TeachMode()
            print ("Please move robot arm to origin point")
            print ("Notice: the external sensor connector points to +X, and tool towards -Z")
            print ("And press Enter key >>>")
            if raw_input() == '':
                ori = self.GetCurrentEndPos()
                print ("New origin point is (%f, %f, %f)" % ori)
            else:
                print ("Cancel without changing coordinate")
                return

        if len(ori) != 2:
            if len(ori) != 3:
                print ("Error value! ori should be (x, y, z)")
                return
            # convert coordinate
            unix = ori / np.linalg.norm(ori[:2])
            unix[2] = 0
            uniz = np.asarray((0, 0, 1), dtype=np.float32)
            unio = ori

            q, t = quat.from_vector_to_q((1, 0, 0), unio + unix, (0, 0, 1), unio + uniz, (0, 0, 0), unio)
            ori = (t, q)

        elif len(ori[0]) != 3 or len(ori[1]) != 4:
            print ("Error value! ori should be (x, y, z), (w, i, j, k)")
            return

        self.__base_t = np.asarray(ori[0], dtype=np.float32)
        self.__base_q = np.asarray(ori[1], dtype=np.float32) 
Example 54
Project: UR5_Controller   Author: tsinghua-rll   File: HAPI.py    MIT License 5 votes vote down vote up
def GetCurrentEndForce(self):
        f = super(HAPI, self).GetCurrentEndForce()
        f, t = f[:3], f[3:]
        f = quat.qrotote_v(quat.qconj(self.__base_q), f)
        t = quat.qrotote_v(quat.qconj(self.__base_q), t)
        return np.asarray((f[0], f[1], f[2], t[0], t[1], t[2]), dtype=np.float32) 
Example 55
Project: UR5_Controller   Author: tsinghua-rll   File: HAPI.py    MIT License 5 votes vote down vote up
def GetTargetEndPos(self, RAW=False):
        p, q = super(HAPI, self).GetTargetEndPos()
        p = quat.qrotote_v(quat.qconj(self.__base_q), p - self.__base_t)
        q = quat.qmul(quat.qconj(self.__base_q), q)
        if RAW:
            q = self.from_q_to_rad_axis(q)
            return np.asarray((p[0], p[1], p[2], q[0], q[1], q[2]), dtype=np.float32)
        else:
            return p, q 
Example 56
Project: UR5_Controller   Author: tsinghua-rll   File: API.py    MIT License 5 votes vote down vote up
def from_rad_axis_to_q(r):
        """
        :param q: rad in format (x-y-z) * r
        :return: quaternion in w-i-j-k format
        """
        norm_axis = math.sqrt(r[0] ** 2 + r[1] ** 2 + r[2] ** 2)
        if norm_axis < 1e-5:
            return np.asarray((1.0, 0., 0., 0.), dtype=np.float32)
        else:
            return np.asarray((math.cos(0.5 * norm_axis),
                               math.sin(0.5 * norm_axis) * r[0] / norm_axis,
                               math.sin(0.5 * norm_axis) * r[1] / norm_axis,
                               math.sin(0.5 * norm_axis) * r[2] / norm_axis), dtype=np.float32) 
Example 57
Project: UR5_Controller   Author: tsinghua-rll   File: API.py    MIT License 5 votes vote down vote up
def GetCurrentJointRad(self):
        """
        :return: q in 6-double tuple
        """
        if not self.__direct_control_mode:
            print ("Warning: in buffered mode, Get operation will be ignored!")
        return np.asarray(self.rtif.receive()["Actual Joint Positions"], dtype=np.float32) 
Example 58
Project: UR5_Controller   Author: tsinghua-rll   File: API.py    MIT License 5 votes vote down vote up
def GetTargetJointRad(self):
        """
        :return: q in 6-double tuple
        """
        if not self.__direct_control_mode:
            print ("Warning: in buffered mode, Get operation will be ignored!")
        return np.asarray(self.rtif.receive()["Target Joint Positions"], dtype=np.float32) 
Example 59
Project: UR5_Controller   Author: tsinghua-rll   File: API.py    MIT License 5 votes vote down vote up
def GetCurrentEndPos(self):
        """
        :return: (x,y,z), (w,i,j,k)
        """
        if not self.__direct_control_mode:
            print ("Warning: in buffered mode, Get operation will be ignored!")
        ret = self.rtif.receive()["Actual Tool Coordinates"]
        return np.asarray(ret[:3], dtype=np.float32), self.from_rad_axis_to_q(ret[3:]) 
Example 60
Project: UR5_Controller   Author: tsinghua-rll   File: API.py    MIT License 5 votes vote down vote up
def GetTargetEndPos(self, RAW=False):
        """
        :return: (x,y,z), (w,i,j,k)
        """
        if not self.__direct_control_mode:
            print ("Warning: in buffered mode, Get operation will be ignored!")
        ret = self.rtif.receive()["Target Tool Coordinates"]
        if RAW:
            return ret
        return np.asarray(ret[:3], dtype=np.float32), self.from_rad_axis_to_q(ret[3:]) 
Example 61
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 5 votes vote down vote up
def qmul(q0, q1):
    w0, x0, y0, z0 = q0
    w1, x1, y1, z1 = q1
    return np.array([-x0 * x1 - y0 * y1 - z0 * z1 + w0 * w1,
                     x0 * w1 + y0 * z1 - z0 * y1 + w0 * x1,
                     -x0 * z1 + y0 * w1 + z0 * x1 + w0 * y1,
                     x0 * y1 - y0 * x1 + z0 * w1 + w0 * z1], dtype=np.float32) 
Example 62
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 5 votes vote down vote up
def qconj(q0):
    w0, x0, y0, z0 = q0
    return np.array([w0, -x0, -y0, -z0], dtype=np.float32) 
Example 63
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 5 votes vote down vote up
def qrotote_v(q0, v1, trans=(0, 0, 0)):
    """
    rotate the vector, if want rotate the coordinate, use qrotate_v(qconj(q0), v1)
    """
    q1 = np.zeros_like(q0)
    q1[1:] = v1
    return qmul(qmul(q0, q1), qconj(q0))[1:] + np.asarray(trans, dtype=np.float32) 
Example 64
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 5 votes vote down vote up
def to_angle_axis(q0):
    rad = math.atan2(np.linalg.norm(q0[1:]), q0[0])
    sin_x = math.sin(rad)
    if abs(sin_x) < 1e-5:
        return np.array([sin_x * 2.0, q0[1], q0[2], q0[3]], dtype=np.float32)
    return np.array([rad * 2.0, q0[1] / sin_x, q0[2] / sin_x, q0[3] / sin_x], dtype=np.float32) 
Example 65
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 5 votes vote down vote up
def from_matrix_to_q(mat):
    qw = math.sqrt(max(0, 1 + mat[0][0] + mat[1][1] + mat[2][2])) / 2.0
    qi = math.copysign(math.sqrt(max(0, 1 + mat[0][0] - mat[1][1] - mat[2][2])) / 2.0, mat[2][1] - mat[1][2])
    qj = math.copysign(math.sqrt(max(0, 1 - mat[0][0] + mat[1][1] - mat[2][2])) / 2.0, mat[0][2] - mat[2][0])
    qk = math.copysign(math.sqrt(max(0, 1 - mat[0][0] - mat[1][1] + mat[2][2])) / 2.0, mat[1][0] - mat[0][1])
    return np.asarray((qw, qi, qj, qk), dtype=np.float32) 
Example 66
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 5 votes vote down vote up
def normalize(q):
    q = np.asarray(q, dtype=np.float32) / np.linalg.norm(q)
    return q 
Example 67
Project: aospy   Author: spencerahill   File: data_loader.py    Apache License 2.0 5 votes vote down vote up
def _sel_var(ds, var, upcast_float32=True):
    """Select the specified variable by trying all possible alternative names.

    Parameters
    ----------
    ds : Dataset
        Dataset possibly containing var
    var : aospy.Var
        Variable to find data for
    upcast_float32 : bool (default True)
        Whether to cast a float32 DataArray up to float64

    Returns
    -------
    DataArray

    Raises
    ------
    KeyError
        If the variable is not in the Dataset

    """
    for name in var.names:
        try:
            da = ds[name].rename(var.name)
            if upcast_float32:
                return _maybe_cast_to_float64(da)
            else:
                return da
        except KeyError:
            pass
    msg = '{0} not found among names: {1} in\n{2}'.format(var, var.names, ds)
    raise LookupError(msg) 
Example 68
Project: aospy   Author: spencerahill   File: test_data_loader.py    Apache License 2.0 5 votes vote down vote up
def test_load_variable_float32_to_float64(load_variable_data_loader,
                                          start_date, end_date):
    def preprocess(ds, **kwargs):
        # This function converts testing data to the float32 datatype
        return ds.astype(np.float32)
    load_variable_data_loader.upcast_float32 = True
    load_variable_data_loader.preprocess_func = preprocess
    result = load_variable_data_loader.load_variable(
        condensation_rain, start_date,
        end_date,
        intvl_in='monthly').dtype
    expected = np.float64
    assert result == expected 
Example 69
Project: aospy   Author: spencerahill   File: test_data_loader.py    Apache License 2.0 5 votes vote down vote up
def test_load_variable_maintain_float32(load_variable_data_loader,
                                        start_date, end_date):
    def preprocess(ds, **kwargs):
        # This function converts testing data to the float32 datatype
        return ds.astype(np.float32)
    load_variable_data_loader.preprocess_func = preprocess
    load_variable_data_loader.upcast_float32 = False
    result = load_variable_data_loader.load_variable(
        condensation_rain, start_date,
        end_date,
        intvl_in='monthly').dtype
    expected = np.float32
    assert result == expected 
Example 70
Project: RF-Monitor   Author: EarToEarOak   File: receive.py    GNU General Public License v2.0 5 votes vote down vote up
def __stream_to_complex(self, stream):
        bytes_np = numpy.ctypeslib.as_array(stream)
        iq = bytes_np.astype(numpy.float32).view(numpy.complex64)
        iq /= 255 / 2
        iq -= 1 + 1j

        return iq 
Example 71
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: blob.py    MIT License 5 votes vote down vote up
def im_list_to_blob(ims):
  """Convert a list of images into a network input.

  Assumes images are already prepared (means subtracted, BGR order, ...).
  """
  max_shape = np.array([im.shape for im in ims]).max(axis=0)
  num_images = len(ims)
  blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
                  dtype=np.float32)
  for i in range(num_images):
    im = ims[i]
    blob[i, 0:im.shape[0], 0:im.shape[1], :] = im

  return blob 
Example 72
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: blob.py    MIT License 5 votes vote down vote up
def prep_im_for_blob(im, pixel_means, target_size, max_size):
  """Mean subtract and scale an image for use in a blob."""
  im = im.astype(np.float32, copy=False)
  im -= pixel_means
  im_shape = im.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])
  im_scale = float(target_size) / float(im_size_min)
  # Prevent the biggest axis from being more than MAX_SIZE
  if np.round(im_scale * im_size_max) > max_size:
    im_scale = float(max_size) / float(im_size_max)
  im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                  interpolation=cv2.INTER_LINEAR)

  return im, im_scale 
Example 73
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test.py    MIT License 5 votes vote down vote up
def _get_image_blob(im):
  """Converts an image into a network input.
  Arguments:
    im (ndarray): a color image in BGR order
  Returns:
    blob (ndarray): a data blob holding an image pyramid
    im_scale_factors (list): list of image scales (relative to im) used
      in the image pyramid
  """
  im_orig = im.astype(np.float32, copy=True)
  im_orig -= cfg.PIXEL_MEANS

  im_shape = im_orig.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])

  processed_ims = []
  im_scale_factors = []

  for target_size in cfg.TEST.SCALES:
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
      im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
    im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
            interpolation=cv2.INTER_LINEAR)
    im_scale_factors.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors) 
Example 74
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_crf_train.py    MIT License 4 votes vote down vote up
def inference(X, weights, bias, reuse = None, trainMode = True):
    word_vectors = tf.nn.embedding_lookup(WORDS, X)
    # [batch_size, 80, 50]

    length = GetLength(X)
    length_64 = tf.cast(length, tf.int64)
    reuse = None if trainMode else True

    #if trainMode:
    #  word_vectors = tf.nn.dropout(word_vectors, 0.5)
    with tf.variable_scope("rnn_fwbw", reuse = reuse) as scope:
        forward_output, _ = tf.nn.dynamic_rnn(
            tf.contrib.rnn.LSTMCell(FLAGS.num_hidden, reuse = reuse),
            word_vectors,
            dtype = tf.float32,
            sequence_length = length,
            scope = "RNN_forward")
        backward_output_, _ = tf.nn.dynamic_rnn(
            tf.contrib.rnn.LSTMCell(FLAGS.num_hidden, reuse = reuse),
            inputs = tf.reverse_sequence(word_vectors,
                                       length_64,
                                       seq_dim = 1),
            dtype = tf.float32,
            sequence_length = length,
            scope = "RNN_backword")

    backward_output = tf.reverse_sequence(backward_output_,
                                          length_64,
                                          seq_dim = 1)

    output = tf.concat([forward_output, backward_output], 2)
    # [batch_size, 80, 200]
    output = tf.reshape(output, [-1, FLAGS.num_hidden * 2])
    if trainMode:
        output = tf.nn.dropout(output, 0.5)

    matricized_unary_scores = tf.matmul(output, weights) + bias
    # [batch_size, 80, 4]
    unary_scores = tf.reshape(
        matricized_unary_scores,
        [-1, FLAGS.max_sentence_len, FLAGS.num_tags])

    return unary_scores, length 
Example 75
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train.py    MIT License 4 votes vote down vote up
def inference(X, weights, bias, reuse = None, trainMode = True, type="char"):
    if type == "char":
        word_vectors = tf.nn.embedding_lookup(WORDS_char, X)
    elif type == "pinyin":
        word_vectors = tf.nn.embedding_lookup(WORDS_pinyin, X)
    elif type == "wubi":
        word_vectors = tf.nn.embedding_lookup(WORDS_wubi, X)
    else:
        pass
    # [batch_size, 80, 100]

    length = GetLength(X)
    length_64 = tf.cast(length, tf.int64)
    reuse = None if trainMode else True

    #if trainMode:
    #  word_vectors = tf.nn.dropout(word_vectors, 0.5)

    with tf.variable_scope("rnn_fwbw", reuse = reuse) as scope:
        forward_output, _ = tf.nn.dynamic_rnn(
            tf.contrib.rnn.LSTMCell(FLAGS.num_hidden, reuse = reuse),
            word_vectors,
            dtype = tf.float32,
            sequence_length = length,
            scope = "RNN_forward")
        backward_output_, _ = tf.nn.dynamic_rnn(
            tf.contrib.rnn.LSTMCell(FLAGS.num_hidden, reuse = reuse),
            inputs = tf.reverse_sequence(word_vectors,
                                       length_64,
                                       seq_dim = 1),
            dtype = tf.float32,
            sequence_length = length,
            scope = "RNN_backward")

    backward_output = tf.reverse_sequence(backward_output_,
                                          length_64,
                                          seq_dim = 1)
    output = tf.concat([forward_output, backward_output], 2)
    # [batch_size, 80, 200]
    output = tf.reshape(output, [-1, FLAGS.num_hidden * 2])
    if trainMode:
        output = tf.nn.dropout(output, 0.5)

    matricized_unary_scores = tf.matmul(output, weights) + bias
    # [batch_size, 80, 4]
    unary_scores = tf.reshape(
        matricized_unary_scores,
        [-1, FLAGS.max_sentence_len, FLAGS.num_tags])

    return unary_scores, length 
Example 76
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm_crf_train.py    MIT License 4 votes vote down vote up
def inference(X, final_vectors, weights, bias, reuse = None, trainMode = True):
    #word_vectors = tf.nn.embedding_lookup(WORDS, X)
    # [batch_size, 80, 100]

    length = GetLength(X)
    length_64 = tf.cast(length, tf.int64)
    reuse = None if trainMode else True

    #if trainMode:
    #  word_vectors = tf.nn.dropout(word_vectors, 0.5)
    with tf.variable_scope("rnn_fwbw", reuse = reuse) as scope:
        forward_output, _ = tf.nn.dynamic_rnn(
            tf.contrib.rnn.LSTMCell(FLAGS.num_hidden, reuse = reuse),
            final_vectors,
            dtype = tf.float32,
            sequence_length = length,
            scope = "RNN_forward")
        backward_output_, _ = tf.nn.dynamic_rnn(
            tf.contrib.rnn.LSTMCell(FLAGS.num_hidden, reuse = reuse),
            inputs = tf.reverse_sequence(final_vectors,
                                       length_64,
                                       seq_dim = 1),
            dtype = tf.float32,
            sequence_length = length,
            scope = "RNN_backword")

    backward_output = tf.reverse_sequence(backward_output_,
                                          length_64,
                                          seq_dim = 1)

    output = tf.concat([forward_output, backward_output], 2)
    # [batch_size, 80, 200]
    output = tf.reshape(output, [-1, FLAGS.num_hidden * 2])
    if trainMode:
        output = tf.nn.dropout(output, 0.5)

    matricized_unary_scores = tf.matmul(output, weights) + bias
    # [batch_size, 80, 4]
    unary_scores = tf.reshape(
        matricized_unary_scores,
        [-1, FLAGS.max_sentence_len, FLAGS.num_tags])

    return unary_scores, length 
Example 77
Project: cat-bbs   Author: aleju   File: train.py    MIT License 4 votes vote down vote up
def bb_coords_to_grid(bb_coords_one, img_shape, grid_size):
    """Convert bounding box coordinates (corners) to ground truth heatmaps."""
    if isinstance(bb_coords_one, ia.KeypointsOnImage):
        bb_coords_one = bb_coords_one.keypoints

    # bb edges after augmentation
    x1b = min([kp.x for kp in bb_coords_one])
    x2b = max([kp.x for kp in bb_coords_one])
    y1b = min([kp.y for kp in bb_coords_one])
    y2b = max([kp.y for kp in bb_coords_one])

    # clip
    x1c = np.clip(x1b, 0, img_shape[1]-1)
    y1c = np.clip(y1b, 0, img_shape[0]-1)
    x2c = np.clip(x2b, 0, img_shape[1]-1)
    y2c = np.clip(y2b, 0, img_shape[0]-1)

    # project
    x1d = int((x1c / img_shape[1]) * grid_size)
    y1d = int((y1c / img_shape[0]) * grid_size)
    x2d = int((x2c / img_shape[1]) * grid_size)
    y2d = int((y2c / img_shape[0]) * grid_size)

    assert 0 <= x1d < grid_size
    assert 0 <= y1d < grid_size
    assert 0 <= x2d < grid_size
    assert 0 <= y2d < grid_size

    # output ground truth:
    # - 1 heatmap that is 1 everywhere where there is a bounding box
    # - 9 position sensitive heatmaps,
    #   e.g. the first one is 1 everywhere where there is the _top left corner_
    #        of a bounding box,
    #        the second one is 1 for the top center cell,
    #        the third one is 1 for the top right corner,
    #        ...
    grids = np.zeros((grid_size, grid_size, 1+9), dtype=np.float32)
    # first heatmap
    grids[y1d:y2d+1, x1d:x2d+1, 0] = 1
    # position sensitive heatmaps
    nb_cells_x = 3
    nb_cells_y = 3
    cell_width = (x2d - x1d) / nb_cells_x
    cell_height = (y2d - y1d) / nb_cells_y
    cell_counter = 0
    for j in range(nb_cells_y):
        cell_y1 = y1d + cell_height * j
        cell_y2 = cell_y1 + cell_height
        cell_y1_int = np.clip(int(math.floor(cell_y1)), 0, img_shape[0]-1)
        cell_y2_int = np.clip(int(math.floor(cell_y2)), 0, img_shape[0]-1)
        for i in range(nb_cells_x):
            cell_x1 = x1d + cell_width * i
            cell_x2 = cell_x1 + cell_width
            cell_x1_int = np.clip(int(math.floor(cell_x1)), 0, img_shape[1]-1)
            cell_x2_int = np.clip(int(math.floor(cell_x2)), 0, img_shape[1]-1)
            grids[cell_y1_int:cell_y2_int+1, cell_x1_int:cell_x2_int+1, 1+cell_counter] = 1
            cell_counter += 1
    return grids 
Example 78
Project: explirefit   Author: codogogo   File: io_helper.py    Apache License 2.0 4 votes vote down vote up
def load_embeddings_dict_with_norms(filepath, limit = None, special_tokens = None, print_load_progress = False, min_one_letter = False, skip_first_line = False):
	norms = []
	vocabulary = {}
	embeddings = []
	cnt = 0
	cnt_dict = 0
	emb_size = -1

	with codecs.open(filepath,'r',encoding='utf8', errors='replace') as f:
		for line in f:
			try:
				cnt += 1
				if limit and cnt > limit: 
					break
				if print_load_progress and (cnt % 1000 == 0): 
					print("Loading embeddings: " + str(cnt))
				if cnt > 1 or not skip_first_line:
					splt = line.split()
					word = splt[0]
					if word.startswith("en_"):
						word = word.replace("en_", "").strip()	
					if min_one_letter and not any(c.isalpha() for c in word):
						continue

					vec = [np.float32(x) for x in splt[1:]]
					if emb_size < 0 and len(vec) > 10:
						emb_size = len(vec)

					if emb_size > 0 and len(vec) == emb_size:
						vocabulary[word] = cnt_dict
						cnt_dict += 1
						norms.append(np.linalg.norm(vec, 2))
						embeddings.append(vec)			
			except(ValueError,IndexError,UnicodeEncodeError):
				print("Incorrect format line!")
	
	if special_tokens is not None:
		for st in special_tokens:
			vocabulary[st] = cnt_dict
			cnt_dict += 1
			vec = np.array([0.1 * (special_tokens.index(st) + 1)] * emb_size) #np.random.uniform(-1.0, 1.0, size = [emb_size])
			norms.append(np.linalg.norm(vec, 2))
			embeddings.append(vec)

	return vocabulary, np.array(embeddings, dtype = np.float32), norms 

############################################################################################################################ 
Example 79
Project: TensorFlow-TransX   Author: thunlp   File: transR.py    MIT License 4 votes vote down vote up
def __init__(self, config, ent_init = None, rel_init = None):

		entity_total = config.entity
		relation_total = config.relation
		batch_size = config.batch_size
		sizeE = config.hidden_sizeE
		sizeR = config.hidden_sizeR
		margin = config.margin

		with tf.name_scope("read_inputs"):
			self.pos_h = tf.placeholder(tf.int32, [batch_size])
			self.pos_t = tf.placeholder(tf.int32, [batch_size])
			self.pos_r = tf.placeholder(tf.int32, [batch_size])
			self.neg_h = tf.placeholder(tf.int32, [batch_size])
			self.neg_t = tf.placeholder(tf.int32, [batch_size])
			self.neg_r = tf.placeholder(tf.int32, [batch_size])

		with tf.name_scope("embedding"):
			if ent_init != None:
				self.ent_embeddings = tf.Variable(np.loadtxt(ent_init), name = "ent_embedding", dtype = np.float32)
			else:
				self.ent_embeddings = tf.get_variable(name = "ent_embedding", shape = [entity_total, sizeE], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
			if rel_init != None:
				self.rel_embeddings = tf.Variable(np.loadtxt(rel_init), name = "rel_embedding", dtype = np.float32)
			else:
				self.rel_embeddings = tf.get_variable(name = "rel_embedding", shape = [relation_total, sizeR], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
			
			rel_matrix = np.zeros([relation_total, sizeR * sizeE], dtype = np.float32)
			for i in range(relation_total):
				for j in range(sizeR):
					for k in range(sizeE):
						if j == k:
							rel_matrix[i][j * sizeE + k] = 1.0
			self.rel_matrix = tf.Variable(rel_matrix, name = "rel_matrix")

		with tf.name_scope('lookup_embeddings'):
			pos_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h), [-1, sizeE, 1])
			pos_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t), [-1, sizeE, 1])
			pos_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, self.pos_r), [-1, sizeR])
			neg_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h), [-1, sizeE, 1])
			neg_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t), [-1, sizeE, 1])
			neg_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, self.neg_r), [-1, sizeR])			
			pos_matrix = tf.reshape(tf.nn.embedding_lookup(self.rel_matrix, self.pos_r), [-1, sizeR, sizeE])
			neg_matrix = tf.reshape(tf.nn.embedding_lookup(self.rel_matrix, self.neg_r), [-1, sizeR, sizeE])

			pos_h_e = tf.nn.l2_normalize(tf.reshape(tf.matmul(pos_matrix, pos_h_e), [-1, sizeR]), 1)
			pos_t_e = tf.nn.l2_normalize(tf.reshape(tf.matmul(pos_matrix, pos_t_e), [-1, sizeR]), 1)
			neg_h_e = tf.nn.l2_normalize(tf.reshape(tf.matmul(neg_matrix, neg_h_e), [-1, sizeR]), 1)
			neg_t_e = tf.nn.l2_normalize(tf.reshape(tf.matmul(neg_matrix, neg_t_e), [-1, sizeR]), 1)

		if config.L1_flag:
			pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
			neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
			self.predict = pos
		else:
			pos = tf.reduce_sum((pos_h_e + pos_r_e - pos_t_e) ** 2, 1, keep_dims = True)
			neg = tf.reduce_sum((neg_h_e + neg_r_e - neg_t_e) ** 2, 1, keep_dims = True)
			self.predict = pos

		with tf.name_scope("output"):
			self.loss = tf.reduce_sum(tf.maximum(pos - neg + margin, 0)) 
Example 80
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: minibatch.py    MIT License 4 votes vote down vote up
def get_minibatch(roidb, num_classes):
  """Given a roidb, construct a minibatch sampled from it."""
  num_images = len(roidb)
  # Sample random scales to use for each image in this batch
  random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
                  size=num_images)
  assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
    'num_images ({}) must divide BATCH_SIZE ({})'. \
    format(num_images, cfg.TRAIN.BATCH_SIZE)

  # Get the input image blob, formatted for caffe
  im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

  blobs = {'data': im_blob}

  assert len(im_scales) == 1, "Single batch only"
  assert len(roidb) == 1, "Single batch only"
  
  # gt boxes: (x1, y1, x2, y2, cls)
  #if cfg.TRAIN.USE_ALL_GT:
    # Include all ground truth boxes
  #  gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
  #else:
    # For the COCO ground truth boxes, exclude the ones that are ''iscrowd'' 
  #  gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
  #gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
  #gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
  #gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
  boxes = roidb[0]['boxes'] * im_scales[0]
  batch_ind = 0 * np.ones((boxes.shape[0], 1))
  boxes = np.hstack((batch_ind, boxes))
  DEDUP_BOXES=1./16.
  if DEDUP_BOXES > 0:
    v = np.array([1,1e3, 1e6, 1e9, 1e12])
    hashes = np.round(boxes * DEDUP_BOXES).dot(v)
    _, index, inv_index = np.unique(hashes, return_index=True,
                                    return_inverse=True)
    boxes = boxes[index, :]
  
  blobs['boxes'] = boxes
  blobs['im_info'] = np.array(
    [im_blob.shape[1], im_blob.shape[2], im_scales[0]],
    dtype=np.float32)
  blobs['labels'] = roidb[0]['labels']

  return blobs