Python numpy.clip() Examples

The following are code examples for showing how to use numpy.clip(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Tacotron   Author: ElwynWang   File: signal_process.py    GNU General Public License v3.0 10 votes vote down vote up
def spectrogrom2wav(mag):
    '''# Generate wave file from spectrogram'''
    # transpose
    mag = mag.T

    # de-noramlize
    mag = (np.clip(mag, 0, 1) * Hp.max_db) - Hp.max_db + Hp.ref_db

    # to amplitude
    mag = np.power(10.0, mag * 0.05)

    # wav reconstruction
    wav = griffin_lim(mag)

    # de-preemphasis
    wav = signal.lfilter([1], [1, -Hp.preemphasis], wav)

    # trim
    wav, _ = librosa.effects.trim(wav)

    return wav.astype(np.float32) 
Example 2
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 6 votes vote down vote up
def add_image(self, img):
        if self.print_progress and self.cur_images % self.progress_interval == 0:
            print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
            sys.stdout.flush()
        if self.shape is None:
            self.shape = img.shape
            self.resolution_log2 = int(np.log2(self.shape[1]))
            assert self.shape[0] in [1, 3]
            assert self.shape[1] == self.shape[2]
            assert self.shape[1] == 2**self.resolution_log2
            tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
            for lod in range(self.resolution_log2 - 1):
                tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
                self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
        assert img.shape == self.shape
        for lod, tfr_writer in enumerate(self.tfr_writers):
            if lod:
                img = img.astype(np.float32)
                img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
            quant = np.rint(img).clip(0, 255).astype(np.uint8)
            ex = tf.train.Example(features=tf.train.Features(feature={
                'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
                'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
            tfr_writer.write(ex.SerializeToString())
        self.cur_images += 1 
Example 3
Project: MODS_ConvNet   Author: santiagolopezg   File: filter_visualize.py    MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_dim_ordering() == 'th':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x



# build the network with best weights 
Example 4
Project: MODS_ConvNet   Author: santiagolopezg   File: filter_visualize.py    MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_dim_ordering() == 'th':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x



# build the network with best weights 
Example 5
Project: neural-fingerprinting   Author: StephanZheng   File: pgd_cw_whitebox.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example 6
Project: neural-fingerprinting   Author: StephanZheng   File: pgd_whitebox.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example 7
Project: neural-fingerprinting   Author: StephanZheng   File: pgd_cw_whitebox.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example 8
Project: neural-fingerprinting   Author: StephanZheng   File: pgd_whitebox.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: pgd_cw_whitebox.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example 10
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True):
    def attack(a):
        session = tf.Session()
        with session.as_default():
            model = RVBCleverhansModel(a)
            adversarial_image = cleverhans_attack_fn(model, session, a)
            adversarial_image = np.squeeze(adversarial_image, axis=0)
            if reset:
                # optionally, reset to ignore other adversarials
                # found during the search
                a._reset()
            # run predictions to make sure the returned adversarial
            # is taken into account
            min_, max_ = a.bounds()
            adversarial_image = np.clip(adversarial_image, min_, max_)
            a.predictions(adversarial_image)
    return attack 
Example 11
Project: Deep_VoiceChanger   Author: pstuvwx   File: image.py    MIT License 6 votes vote down vote up
def Chainer2PIL(data, rescale=True):
    data = np.array(data)
    if rescale:
        data *= 256
        # data += 128
    if data.dtype != np.uint8:
        data = np.clip(data, 0, 255)
        data = data.astype(np.uint8)
    if data.shape[0] == 1:
        buf = data.astype(np.uint8).reshape((data.shape[1], data.shape[2]))
    else:
        buf = np.zeros((data.shape[1], data.shape[2], data.shape[0]), dtype=np.uint8)
        for i in range(3):
            a = data[i,:,:]
            buf[:,:,i] = a
    img = Image.fromarray(buf)
    return img 
Example 12
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 6 votes vote down vote up
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
Example 13
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: vaegan_mxnet.py    Apache License 2.0 6 votes vote down vote up
def visual(title, X, activation):
    '''create a grid of images and save it as a final image
    title : grid image name
    X : array of images
    '''
    assert len(X.shape) == 4

    X = X.transpose((0, 2, 3, 1))
    if activation == 'sigmoid':
        X = np.clip((X)*(255.0), 0, 255).astype(np.uint8)
    elif activation == 'tanh':
        X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
    n = np.ceil(np.sqrt(X.shape[0]))
    buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
    for i, img in enumerate(X):
        fill_buf(buff, i, img, X.shape[1:3])
    cv2.imwrite('%s.jpg' % (title), buff) 
Example 14
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: atari_game.py    Apache License 2.0 6 votes vote down vote up
def play(self, a):
        assert not self.episode_terminate,\
            "Warning, the episode seems to have terminated. " \
            "We need to call either game.begin_episode(max_episode_step) to continue a new " \
            "episode or game.start() to force restart."
        self.episode_step += 1
        reward = 0.0
        action = self.action_set[a]
        for i in range(self.frame_skip):
            reward += self.ale.act(action)
            self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
        self.total_reward += reward
        self.episode_reward += reward
        ob = self.get_observation()
        terminate_flag = self.episode_terminate
        self.replay_memory.append(ob, a, numpy.clip(reward, -1, 1), terminate_flag)
        return reward, terminate_flag 
Example 15
Project: GST-Tacotron   Author: KinglittleQ   File: utils.py    MIT License 6 votes vote down vote up
def spectrogram2wav(mag):
    '''# Generate wave file from spectrogram'''
    # transpose
    mag = mag.T

    # de-noramlize
    mag = (np.clip(mag, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db

    # to amplitude
    mag = np.power(10.0, mag * 0.05)

    # wav reconstruction
    wav = griffin_lim(mag)

    # de-preemphasis
    wav = signal.lfilter([1], [1, -hp.preemphasis], wav)

    # trim
    wav, _ = librosa.effects.trim(wav)

    return wav.astype(np.float32) 
Example 16
Project: soccer-matlab   Author: utra-robosoccer   File: humanoid_running.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def collect_observations(human):
	#print("ordered_joint_indices")
	#print(ordered_joint_indices)
	
	jointStates = p.getJointStates(human,ordered_joint_indices)
	j = np.array([current_relative_position(jointStates, human, *jtuple) for jtuple in ordered_joints]).flatten()
	#print("j")
	#print(j)
	body_xyz, (qx, qy, qz, qw) = p.getBasePositionAndOrientation(human)
	#print("body_xyz")
	#print(body_xyz, qx,qy,qz,qw)
	z = body_xyz[2]
	dummy.distance = body_xyz[0]
	if dummy.initial_z==None:
		dummy.initial_z = z
	(vx, vy, vz), _ = p.getBaseVelocity(human)
	more = np.array([z-dummy.initial_z, 0.1*vx, 0.1*vy, 0.1*vz, qx, qy, qz, qw])
	rcont = p.getContactPoints(human, -1, right_foot, -1)
	#print("rcont")
	#print(rcont)
	lcont = p.getContactPoints(human, -1, left_foot, -1)
	#print("lcont")
	#print(lcont)
	feet_contact = np.array([len(rcont)>0, len(lcont)>0])
	return np.clip( np.concatenate([more] + [j] + [feet_contact]), -5, +5) 
Example 17
Project: soccer-matlab   Author: utra-robosoccer   File: humanoid_running_vr_follow.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def collect_observations(human):
	#print("ordered_joint_indices")
	#print(ordered_joint_indices)
	
	jointStates = p.getJointStates(human,ordered_joint_indices)
	j = np.array([current_relative_position(jointStates, human, *jtuple) for jtuple in ordered_joints]).flatten()
	#print("j")
	#print(j)
	body_xyz, (qx, qy, qz, qw) = p.getBasePositionAndOrientation(human)
	#print("body_xyz")
	#print(body_xyz, qx,qy,qz,qw)
	z = body_xyz[2]
	dummy.distance = body_xyz[0]
	if dummy.initial_z==None:
		dummy.initial_z = z
	(vx, vy, vz), _ = p.getBaseVelocity(human)
	more = np.array([z-dummy.initial_z, 0.1*vx, 0.1*vy, 0.1*vz, qx, qy, qz, qw])
	rcont = p.getContactPoints(human, -1, right_foot, -1)
	#print("rcont")
	#print(rcont)
	lcont = p.getContactPoints(human, -1, left_foot, -1)
	#print("lcont")
	#print(lcont)
	feet_contact = np.array([len(rcont)>0, len(lcont)>0])
	return np.clip( np.concatenate([more] + [j] + [feet_contact]), -5, +5) 
Example 18
Project: soccer-matlab   Author: utra-robosoccer   File: humanoid_running.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def collect_observations(human):
	#print("ordered_joint_indices")
	#print(ordered_joint_indices)
	
	jointStates = p.getJointStates(human,ordered_joint_indices)
	j = np.array([current_relative_position(jointStates, human, *jtuple) for jtuple in ordered_joints]).flatten()
	#print("j")
	#print(j)
	body_xyz, (qx, qy, qz, qw) = p.getBasePositionAndOrientation(human)
	#print("body_xyz")
	#print(body_xyz, qx,qy,qz,qw)
	z = body_xyz[2]
	dummy.distance = body_xyz[0]
	if dummy.initial_z==None:
		dummy.initial_z = z
	(vx, vy, vz), _ = p.getBaseVelocity(human)
	more = np.array([z-dummy.initial_z, 0.1*vx, 0.1*vy, 0.1*vz, qx, qy, qz, qw])
	rcont = p.getContactPoints(human, -1, right_foot, -1)
	#print("rcont")
	#print(rcont)
	lcont = p.getContactPoints(human, -1, left_foot, -1)
	#print("lcont")
	#print(lcont)
	feet_contact = np.array([len(rcont)>0, len(lcont)>0])
	return np.clip( np.concatenate([more] + [j] + [feet_contact]), -5, +5) 
Example 19
Project: soccer-matlab   Author: utra-robosoccer   File: motor.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def convert_to_torque(self, motor_commands, current_motor_angle,
                        current_motor_velocity):
    """Convert the commands (position control or torque control) to torque.

    Args:
      motor_commands: The desired motor angle if the motor is in position
        control mode. The pwm signal if the motor is in torque control mode.
      current_motor_angle: The motor angle at the current time step.
      current_motor_velocity: The motor velocity at the current time step.
    Returns:
      actual_torque: The torque that needs to be applied to the motor.
      observed_torque: The torque observed by the sensor.
    """
    if self._torque_control_enabled:
      pwm = motor_commands
    else:
      pwm = (-self._kp * (current_motor_angle - motor_commands)
             - self._kd * current_motor_velocity)
    pwm = np.clip(pwm, -1.0, 1.0)
    return self._convert_to_torque_from_pwm(pwm, current_motor_velocity) 
Example 20
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def save_wav(audio, output_wav_file):
    wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16))
    print('output dB', db(audio)) 
Example 21
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 22
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def getctcloss(self, input_audio_batch, target_phrase, decode=False):
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)

        if decode:
            return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
        else:
            return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) 
Example 23
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def _line_to_xy(self, line_x, line_y, limit_y_min=None, limit_y_max=None):
        point_every = max(1, int(len(line_x) / self.nb_points_max))
        points_x = []
        points_y = []
        curr_sum = 0
        counter = 0
        last_idx = len(line_x) - 1
        for i in range(len(line_x)):
            batch_idx = line_x[i]
            if batch_idx > self.start_batch_idx:
                curr_sum += line_y[i]
                counter += 1
                if counter >= point_every or i == last_idx:
                    points_x.append(batch_idx)
                    y = curr_sum / counter
                    if limit_y_min is not None and limit_y_max is not None:
                        y = np.clip(y, limit_y_min, limit_y_max)
                    elif limit_y_min is not None:
                        y = max(y, limit_y_min)
                    elif limit_y_max is not None:
                        y = min(y, limit_y_max)
                    points_y.append(y)
                    counter = 0
                    curr_sum = 0

        return points_x, points_y 
Example 24
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 5 votes vote down vote up
def fix_by_image_dimensions(self, height, width=None):
        if isinstance(height, (tuple, list)):
            assert width is None
            height, width = height[0], height[1]
        elif isinstance(height, (np.ndarray, np.generic)):
            assert width is None
            height, width = height.shape[0], height.shape[1]
        else:
            assert width is not None
            assert isinstance(height, int)
            assert isinstance(width, int)

        self.x1 = int(np.clip(self.x1, 0, width-1))
        self.x2 = int(np.clip(self.x2, 0, width-1))
        self.y1 = int(np.clip(self.y1, 0, height-1))
        self.y2 = int(np.clip(self.y2, 0, height-1))

        if self.x1 > self.x2:
            self.x1, self.x2 = self.x2, self.x1
        if self.y1 > self.y2:
            self.y1, self.y2 = self.y2, self.y1

        if self.x1 == self.x2:
            if self.x1 > 0:
                self.x1 = self.x1 - 1
            else:
                self.x2 = self.x2 + 1

        if self.y1 == self.y2:
            if self.y1 > 0:
                self.y1 = self.y1 - 1
            else:
                self.y2 = self.y2 + 1

        #self.width = self.x2 - self.x1
        #self.height = self.y2 - self.y1 
Example 25
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 5 votes vote down vote up
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, thickness=1, copy=copy):
        assert img.dtype in [np.uint8, np.float32, np.int32, np.int64]

        result = np.copy(img) if copy else img
        for i in range(thickness):
            y = [self.y1-i, self.y1-i, self.y2+i, self.y2+i]
            x = [self.x1-i, self.x2+i, self.x2+i, self.x1-i]
            rr, cc = draw.polygon_perimeter(y, x, shape=img.shape)
            if alpha >= 0.99:
                result[rr, cc, 0] = color[0]
                result[rr, cc, 1] = color[1]
                result[rr, cc, 2] = color[2]
            else:
                if result.dtype == np.float32:
                    result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
                    result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
                    result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
                    result = np.clip(result, 0, 255)
                else:
                    result = result.astype(np.float32)
                    result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
                    result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
                    result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
                    result = np.clip(result, 0, 255).astype(np.uint8)

        return result 
Example 26
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 5 votes vote down vote up
def draw_on_image_filled_binary(self, img, copy=True):
        if copy:
            img = np.copy(img)
        h, w = img.shape[0], img.shape[1]
        x1 = np.clip(self.x1, 0, w-1)
        x2 = np.clip(self.x2, 0, w-1)
        y1 = np.clip(self.y1, 0, h-1)
        y2 = np.clip(self.y2, 0, h-1)
        if x1 < x2 and y1 < y2:
            img[self.y1:self.y2, self.x1:self.x2] = 1
        return img 
Example 27
Project: cat-bbs   Author: aleju   File: common.py    MIT License 5 votes vote down vote up
def draw_heatmap(img, heatmap, alpha=0.5):
    """Draw a heatmap overlay over an image."""
    assert len(heatmap.shape) == 2 or \
        (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
    assert img.dtype in [np.uint8, np.int32, np.int64]
    assert heatmap.dtype in [np.float32, np.float64]

    if img.shape[0:2] != heatmap.shape[0:2]:
        heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
        heatmap_rs = ia.imresize_single_image(
            heatmap_rs[..., np.newaxis],
            img.shape[0:2],
            interpolation="nearest"
        )
        heatmap = np.squeeze(heatmap_rs) / 255.0

    cmap = plt.get_cmap('jet')
    heatmap_cmapped = cmap(heatmap)
    heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
    heatmap_cmapped = heatmap_cmapped * 255
    mix = (1-alpha) * img + alpha * heatmap_cmapped
    mix = np.clip(mix, 0, 255).astype(np.uint8)
    return mix 
Example 28
Project: disentangling_conditional_gans   Author: zalandoresearch   File: util_scripts.py    MIT License 5 votes vote down vote up
def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if mp4 is None:
        mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
    num_frames = int(np.rint(duration_sec * mp4_fps))
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    print('Generating latent vectors...')
    shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    # Frame generation func for moviepy.
    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
        latents = all_latents[frame_idx]
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
        grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC
        if image_zoom > 1:
            grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
        if grid.shape[2] == 1:
            grid = grid.repeat(3, 2) # grayscale => RGB
        return grid

    # Generate video.
    import moviepy.editor # pip install moviepy
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()

#----------------------------------------------------------------------------
# Generate MP4 video of training progress for a previous training run.
# To run, uncomment the appropriate line in config.py and launch train.py. 
Example 29
Project: disentangling_conditional_gans   Author: zalandoresearch   File: train.py    MIT License 5 votes vote down vote up
def setup_snapshot_image_grid(G, training_set,
    size    = '1080p',      # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.
    layout  = 'random'):    # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.

    # Select size.
    gw = 1; gh = 1
    if size == '1080p':
        gw = np.clip(1920 // G.output_shape[3], 3, 32)
        gh = np.clip(1080 // G.output_shape[2], 2, 32)
    if size == '4k':
        gw = np.clip(3840 // G.output_shape[3], 7, 32)
        gh = np.clip(2160 // G.output_shape[2], 4, 32)

    # Fill in reals and labels.
    reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)
    labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
    masks = np.zeros([gw * gh] + [1, training_set.shape[-1], training_set.shape[-1]], dtype=training_set.dtype)
    for idx in range(gw * gh):
        x = idx % gw; y = idx // gw
        while True:
            real, label, mask = training_set.get_minibatch_np(1)
            if layout == 'row_per_class' and training_set.label_size > 0:
                if label[0, y % training_set.label_size] == 0.0:
                    continue
            reals[idx] = real[0]
            labels[idx] = label[0]
            masks[idx] = mask[0]
            break

    # Generate latents.
    latents = misc.random_latents(gw * gh, G)
    return (gw, gh), reals, labels, latents, masks

#----------------------------------------------------------------------------
# Just-in-time processing of training images before feeding them to the networks. 
Example 30
Project: fbpconv_tf   Author: panakino   File: image_gen.py    GNU General Public License v3.0 5 votes vote down vote up
def to_rgb(img):
    img = img.reshape(img.shape[0], img.shape[1])
    img[np.isnan(img)] = 0
    img -= np.amin(img)
    img /= np.amax(img)
    blue = np.clip(4*(0.75-img), 0, 1)
    red  = np.clip(4*(img-0.25), 0, 1)
    green= np.clip(44*np.fabs(img-0.5)-1., 0, 1)
    rgb = np.stack((red, green, blue), axis=2)
    return rgb 
Example 31
Project: fbpconv_tf   Author: panakino   File: image_util.py    GNU General Public License v3.0 5 votes vote down vote up
def _process_data(self, data):
        # normalization
        data = np.clip(np.fabs(data), self.a_min, self.a_max)
        data -= np.amin(data)
        data /= np.amax(data)
        return data 
Example 32
Project: dc_tts   Author: Kyubyong   File: utils.py    Apache License 2.0 5 votes vote down vote up
def spectrogram2wav(mag):
    '''# Generate wave file from linear magnitude spectrogram

    Args:
      mag: A numpy array of (T, 1+n_fft//2)

    Returns:
      wav: A 1-D numpy array.
    '''
    # transpose
    mag = mag.T

    # de-noramlize
    mag = (np.clip(mag, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db

    # to amplitude
    mag = np.power(10.0, mag * 0.05)

    # wav reconstruction
    wav = griffin_lim(mag**hp.power)

    # de-preemphasis
    wav = signal.lfilter([1], [1, -hp.preemphasis], wav)

    # trim
    wav, _ = librosa.effects.trim(wav)

    return wav.astype(np.float32) 
Example 33
Project: mmdetection   Author: open-mmlab   File: transforms.py    Apache License 2.0 5 votes vote down vote up
def _resize_bboxes(self, results):
        img_shape = results['img_shape']
        for key in results.get('bbox_fields', []):
            bboxes = results[key] * results['scale_factor']
            bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
            bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
            results[key] = bboxes 
Example 34
Project: mmdetection   Author: open-mmlab   File: mask_target.py    Apache License 2.0 5 votes vote down vote up
def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
    mask_size = _pair(cfg.mask_size)
    num_pos = pos_proposals.size(0)
    mask_targets = []
    if num_pos > 0:
        proposals_np = pos_proposals.cpu().numpy()
        _, maxh, maxw = gt_masks.shape
        proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw - 1)
        proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh - 1)
        pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
        for i in range(num_pos):
            gt_mask = gt_masks[pos_assigned_gt_inds[i]]
            bbox = proposals_np[i, :].astype(np.int32)
            x1, y1, x2, y2 = bbox
            w = np.maximum(x2 - x1 + 1, 1)
            h = np.maximum(y2 - y1 + 1, 1)
            # mask is uint8 both before and after resizing
            # mask_size (h, w) to (w, h)
            target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w],
                                   mask_size[::-1])
            mask_targets.append(target)
        mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to(
            pos_proposals.device)
    else:
        mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
    return mask_targets 
Example 35
Project: PIC   Author: ameroyer   File: utils.py    MIT License 5 votes vote down vote up
def lab_to_rgb(x, eps=1e-8):
    """Converts a lab image [0; 100] [-127; 128] [-128; 127] to a valid RGB image."""
    x_rectified = np.array(x)
    upper_bound = 200 * (x[..., 0] + 16.) / 116. - eps
    x_rectified[..., 2] = np.clip(x_rectified[..., 2], - float('inf'), upper_bound)
    return np.array([lab2rgb(y) * 255. for y in x_rectified]).astype(np.uint8) 
Example 36
Project: neural-fingerprinting   Author: StephanZheng   File: run_attacks_and_defenses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def clip_and_copy_attack_outputs(self, attack_name, is_targeted):
    """Clips results of attack and copy it to directory with all images.

    Args:
      attack_name: name of the attack.
      is_targeted: if True then attack is targeted, otherwise non-targeted.
    """
    if is_targeted:
      self._targeted_attack_names.add(attack_name)
    else:
      self._attack_names.add(attack_name)
    attack_dir = os.path.join(self.targeted_attacks_output_dir
                              if is_targeted
                              else self.attacks_output_dir,
                              attack_name)
    for fname in os.listdir(attack_dir):
      if not (fname.endswith('.png') or fname.endswith('.jpg')):
        continue
      image_id = fname[:-4]
      if image_id not in self.dataset_max_clip:
        continue
      image_max_clip = self.dataset_max_clip[image_id]
      image_min_clip = self.dataset_min_clip[image_id]
      adversarial_image = np.array(
          Image.open(os.path.join(attack_dir, fname)).convert('RGB'))
      clipped_adv_image = np.clip(adversarial_image,
                                  image_min_clip,
                                  image_max_clip)
      output_basename = '{0:08d}'.format(self._output_image_idx)
      self._output_image_idx += 1
      self._output_to_attack_mapping[output_basename] = (attack_name,
                                                         is_targeted,
                                                         image_id)
      if is_targeted:
        self._targeted_attack_image_count += 1
      else:
        self._attack_image_count += 1
      Image.fromarray(clipped_adv_image).save(
          os.path.join(self.all_adv_examples_dir, output_basename + '.png')) 
Example 37
Project: Griffin_lim   Author: candlewill   File: audio.py    MIT License 5 votes vote down vote up
def _normalize(S):
    return np.clip(
        (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value,
        -hparams.max_abs_value, hparams.max_abs_value) 
Example 38
Project: Griffin_lim   Author: candlewill   File: audio.py    MIT License 5 votes vote down vote up
def _denormalize(D):
    return (((np.clip(D, -hparams.max_abs_value,
                      hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (
                     2 * hparams.max_abs_value))
            + hparams.min_level_db) 
Example 39
Project: ML_from_scratch   Author: jarfa   File: util.py    Apache License 2.0 5 votes vote down vote up
def logloss(observed, predicted, trim=1e-9):
    # keep loss from being infinite
    predicted = np.clip(predicted, trim, 1.0 - trim)
    return -np.mean(
        observed * np.log(predicted) + 
        (1. - observed) * np.log(1. - predicted)
    ) 
Example 40
Project: nek-type-a   Author: ecopoesis   File: body.py    Apache License 2.0 5 votes vote down vote up
def angle(p1, p2, p3):
     v1 = unit_vector(p1 - p2)
     v2 = unit_vector(p2 - p3)

     return np.degrees(np.arccos(np.clip(np.dot(v1, v2), -1.0, 1.0))) 
Example 41
Project: deep-learning-note   Author: wdxtub   File: 7_visualize_filters.py    MIT License 5 votes vote down vote up
def deprocess_image(x):
    # 将张量转换为有效图像的函数
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    x += 0.5
    x = np.clip(x, 0, 1)

    x *= 255
    x = np.clip(x, 0, 255).astype('uint8')
    return x 
Example 42
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 5 votes vote down vote up
def deprocess_image(x):
    # 通用函数,将一个张量转换为有效图像
    if K.image_data_format() == 'channels_first':
        x = x.reshape((3, x.shape[2], x.shape[3]))
        x = x.transpose((1, 2, 0))
    else:
        x = x.reshape((x.shape[1], x.shape[2], 3))
    
    x /= 2.
    x += 0.3
    x *= 255.
    x = np.clip(x, 0, 255).astype('uint8')
    return x 
Example 43
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    MIT License 5 votes vote down vote up
def deprocess_image(x):
    # 加上 ImageNet 的平均像素值
    x[:, :, 0] += 103.939
    x[:, :, 1] += 116.779
    x[:, :, 2] += 123.68
    x = x[:, :, ::-1]
    x = np.clip(x, 0, 255).astype('uint8')
    return x 
Example 44
Project: NiBetaSeries   Author: HBClab   File: nilearn.py    MIT License 5 votes vote down vote up
def _fisher_r_to_z(x):
    import numpy as np
    # correct any rounding errors
    # correlations cannot be greater than 1.
    x = np.clip(x, -1, 1)

    return np.arctanh(x) 
Example 45
Project: neural-pipeline   Author: toodef   File: tensorboard.py    MIT License 5 votes vote down vote up
def update_losses(self, losses: {}) -> None:
        """
        Update monitor

        :param losses: losses values with keys 'train' and 'validation'
        """
        if self.__writer is None:
            return

        def on_loss(name: str, values: np.ndarray) -> None:
            self.__writer.add_scalars('loss', {name: np.mean(values)}, global_step=self.epoch_num)
            self.__writer.add_histogram('{}/loss_hist'.format(name), np.clip(values, -1, 1).astype(np.float32),
                                        global_step=self.epoch_num, bins=np.linspace(-1, 1, num=11).astype(np.float32))

        self._iterate_by_losses(losses, on_loss) 
Example 46
Project: rl_3d   Author: avdmitry   File: env_lab.py    MIT License 5 votes vote down vote up
def MapActions(self, action_raw):
        self.action = np.zeros([self.num_actions])

        if (action_raw == 0):
            self.action[self.indices["LOOK_LEFT_RIGHT_PIXELS_PER_FRAME"]] = -25
        elif (action_raw == 1):
            self.action[self.indices["LOOK_LEFT_RIGHT_PIXELS_PER_FRAME"]] = 25

        """if (action_raw==2):
            self.action[self.indices["LOOK_DOWN_UP_PIXELS_PER_FRAME"]] = -25
        elif (action_raw==3):
            self.action[self.indices["LOOK_DOWN_UP_PIXELS_PER_FRAME"]] = 25

        if (action_raw==4):
            self.action[self.indices["STRAFE_LEFT_RIGHT"]] = -1
        elif (action_raw==5):
            self.action[self.indices["STRAFE_LEFT_RIGHT"]] = 1

        if (action_raw==6):
            self.action[self.indices["MOVE_BACK_FORWARD"]] = -1
        el"""
        if (action_raw == 2):  # 7
            self.action[self.indices["MOVE_BACK_FORWARD"]] = 1

        # all binary actions need reset
        """if (action_raw==8):
            self.action[self.indices["FIRE"]] = 0
        elif (action_raw==9):
            self.action[self.indices["FIRE"]] = 1

        if (action_raw==10):
            self.action[self.indices["JUMP"]] = 0
        elif (action_raw==11):
            self.action[self.indices["JUMP"]] = 1

        if (action_raw==12):
            self.action[self.indices["CROUCH"]] = 0
        elif (action_raw==13):
            self.action[self.indices["CROUCH"]] = 1"""

        return np.clip(self.action, self.mins, self.maxs).astype(np.intc) 
Example 47
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: gradcam.py    Apache License 2.0 5 votes vote down vote up
def to_grayscale(cv2im):
    """Convert gradients to grayscale. This gives a saliency map."""
    # How strongly does each position activate the output
    grayscale_im = np.sum(np.abs(cv2im), axis=0)

    # Normalize between min and 99th percentile
    im_max = np.percentile(grayscale_im, 99)
    im_min = np.min(grayscale_im)
    grayscale_im = np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1)

    grayscale_im = np.expand_dims(grayscale_im, axis=0)
    return grayscale_im 
Example 48
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: nstyle.py    Apache License 2.0 5 votes vote down vote up
def PostprocessImage(img):
    img = np.resize(img, (3, img.shape[2], img.shape[3]))
    img[0, :] += 123.68
    img[1, :] += 116.779
    img[2, :] += 103.939
    img = np.swapaxes(img, 1, 2)
    img = np.swapaxes(img, 0, 2)
    img = np.clip(img, 0, 255)
    return img.astype('uint8') 
Example 49
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: data_processing.py    Apache License 2.0 5 votes vote down vote up
def PostprocessImage(img):
    img = np.resize(img, (3, img.shape[2], img.shape[3]))
    img[0, :] += 123.68
    img[1, :] += 116.779
    img[2, :] += 103.939
    img = np.swapaxes(img, 1, 2)
    img = np.swapaxes(img, 0, 2)
    img = np.clip(img, 0, 255)
    return img.astype('uint8') 
Example 50
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: dcgan.py    Apache License 2.0 5 votes vote down vote up
def visual(title, X, name):
    assert len(X.shape) == 4
    X = X.transpose((0, 2, 3, 1))
    X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
    n = np.ceil(np.sqrt(X.shape[0]))
    buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
    for i, img in enumerate(X):
        fill_buf(buff, i, img, X.shape[1:3])
    buff = buff[:,:,::-1]
    plt.imshow(buff)
    plt.title(title)
    plt.savefig(name) 
Example 51
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: operators.py    Apache License 2.0 5 votes vote down vote up
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
        # TODO Backward using NDArray will cause some troubles see `https://github.com/dmlc/mxnet/issues/1720'
        x = out_data[0].asnumpy()
        action = in_data[1].asnumpy().astype(numpy.int)
        reward = in_data[2].asnumpy()
        dx = in_grad[0]
        ret = numpy.zeros(shape=dx.shape, dtype=numpy.float32)
        ret[numpy.arange(action.shape[0]), action] \
            = numpy.clip(x[numpy.arange(action.shape[0]), action] - reward, -1, 1)
        self.assign(dx, req[0], ret) 
Example 52
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: operators.py    Apache License 2.0 5 votes vote down vote up
def backward(self, out_grad, in_data, out_data, in_grad):
        x = out_data[0]
        action = in_data[1].astype(numpy.int)
        reward = in_data[2]
        dx = in_grad[0]
        dx[:] = 0
        dx[numpy.arange(action.shape[0]), action] \
            = numpy.clip(x[numpy.arange(action.shape[0]), action] - reward, -1, 1) 
Example 53
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: strategies.py    Apache License 2.0 5 votes vote down vote up
def get_action(self, obs, policy):

        # get_action accepts a 2D tensor with one row
        obs = obs.reshape((1, -1))
        action = policy.get_action(obs)
        increment = self.evolve_state()

        return np.clip(action + increment,
                       self.action_space.low,
                       self.action_space.high) 
Example 54
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_clip():
    data = mx.symbol.Variable('data')
    shape = (30, 30)
    data_tmp = np.random.uniform(-1, 1, shape)
    test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
    check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
    check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
                            [np.where(data_tmp < 0.6, [1], [0]) * np.where(data_tmp > -0.6, [1], [0])]) 
Example 55
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: simple_pose.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, src, label, img_path):
        cv2 = try_import_cv2()
        joints_3d = label['joints_3d']
        center = label['center']
        scale = label['scale']
        # score = label.get('score', 1)

        # rescale
        sf = self._scale_factor
        scale = scale * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)

        # rotation
        rf = self._rotation_factor
        r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) if random.random() <= 0.6 else 0

        joints = joints_3d
        if self._random_flip and random.random() > 0.5:
            # src, fliped = random_flip_image(src, px=0.5, py=0)
            # if fliped[0]:
            src = src[:, ::-1, :]
            joints = flip_joints_3d(joints_3d, src.shape[1], self._joint_pairs)
            center[0] = src.shape[1] - center[0] - 1

        h, w = self._image_size
        trans = get_affine_transform(center, scale, r, [w, h])
        img = cv2.warpAffine(src.asnumpy(), trans, (int(w), int(h)), flags=cv2.INTER_LINEAR)

        # deal with joints visibility
        for i in range(self._num_joints):
            if joints[i, 0, 1] > 0.0:
                joints[i, 0:2, 0] = affine_transform(joints[i, 0:2, 0], trans)

        # generate training targets
        target, target_weight = self._target_generator(joints)

        # to tensor
        img = mx.nd.image.to_tensor(mx.nd.array(img))
        img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
        return img, target, target_weight, img_path 
Example 56
Project: DOTA_models   Author: ringringyi   File: generate_itb_data.py    Apache License 2.0 5 votes vote down vote up
def get_data_batch(batch_size, T, rng, u_std):
  u_bxt = rng.randn(batch_size, T) * u_std
  running_sum_b = np.zeros([batch_size])
  labels_bxt = np.zeros([batch_size, T])
  for t in xrange(T):
    running_sum_b += u_bxt[:, t]
    labels_bxt[:, t] += running_sum_b
  labels_bxt = np.clip(labels_bxt, -1, 1)
  return u_bxt, labels_bxt 
Example 57
Project: soccer-matlab   Author: utra-robosoccer   File: policies.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def act(self, ob):
    ob = self.observation_filter(ob, update=self.update_filter)
    matrix_weights = np.reshape(self.weights, (self.ac_dim, self.ob_dim))
    return np.clip(np.dot(matrix_weights, ob), -1.0, 1.0) 
Example 58
Project: soccer-matlab   Author: utra-robosoccer   File: motor.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def convert_to_torque(self,
                        motor_commands,
                        motor_angle,
                        motor_velocity,
                        true_motor_velocity,
                        kp=None,
                        kd=None):
    """Convert the commands (position control or torque control) to torque.

    Args:
      motor_commands: The desired motor angle if the motor is in position
        control mode. The pwm signal if the motor is in torque control mode.
      motor_angle: The motor angle observed at the current time step. It is
        actually the true motor angle observed a few milliseconds ago (pd
        latency).
      motor_velocity: The motor velocity observed at the current time step, it
        is actually the true motor velocity a few milliseconds ago (pd latency).
      true_motor_velocity: The true motor velocity. The true velocity is used
        to compute back EMF voltage and viscous damping.
      kp: Proportional gains for the motors' PD controllers. If not provided, it
        uses the default kp of the minitaur for all the motors.
      kd: Derivative gains for the motors' PD controllers. If not provided, it
        uses the default kp of the minitaur for all the motors.

    Returns:
      actual_torque: The torque that needs to be applied to the motor.
      observed_torque: The torque observed by the sensor.
    """
    if self._torque_control_enabled:
      pwm = motor_commands
    else:
      if kp is None:
        kp = np.full(NUM_MOTORS, self._kp)
      if kd is None:
        kd = np.full(NUM_MOTORS, self._kd)
      pwm = -1 * kp * (motor_angle - motor_commands) - kd * motor_velocity

    pwm = np.clip(pwm, -1.0, 1.0)
    return self._convert_to_torque_from_pwm(pwm, true_motor_velocity) 
Example 59
Project: soccer-matlab   Author: utra-robosoccer   File: wrappers.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def step(self, action):
    action_space = self._env.action_space
    action = np.clip(action, action_space.low, action_space.high)
    return self._env.step(action) 
Example 60
Project: soccer-matlab   Author: utra-robosoccer   File: motor.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _convert_to_torque_from_pwm(self, pwm, current_motor_velocity):
    """Convert the pwm signal to torque.

    Args:
      pwm: The pulse width modulation.
      current_motor_velocity: The motor velocity at the current time step.
    Returns:
      actual_torque: The torque that needs to be applied to the motor.
      observed_torque: The torque observed by the sensor.
    """
    observed_torque = np.clip(
        self._torque_constant * (pwm * self._voltage / self._resistance),
        -OBSERVED_TORQUE_LIMIT, OBSERVED_TORQUE_LIMIT)

    # Net voltage is clipped at 50V by diodes on the motor controller.
    voltage_net = np.clip(pwm * self._voltage -
                          (self._torque_constant + self._viscous_damping)
                          * current_motor_velocity,
                          -VOLTAGE_CLIPPING, VOLTAGE_CLIPPING)
    current = voltage_net / self._resistance
    current_sign = np.sign(current)
    current_magnitude = np.absolute(current)

    # Saturate torque based on empirical current relation.
    actual_torque = np.interp(current_magnitude, self._current_table,
                              self._torque_table)
    actual_torque = np.multiply(current_sign, actual_torque)
    return actual_torque, observed_torque 
Example 61
Project: soccer-matlab   Author: utra-robosoccer   File: robot_locomotors.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def apply_action(self, a):
		assert (np.isfinite(a).all())
		for n, j in enumerate(self.ordered_joints):
			j.set_motor_torque(self.power * j.power_coef * float(np.clip(a[n], -1, +1))) 
Example 62
Project: soccer-matlab   Author: utra-robosoccer   File: robot_locomotors.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def calc_state(self):
		j = np.array([j.current_relative_position() for j in self.ordered_joints], dtype=np.float32).flatten()
		# even elements [0::2] position, scaled to -1..+1 between limits
		# odd elements  [1::2] angular speed, scaled to show -1..+1
		self.joint_speeds = j[1::2]
		self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)

		body_pose = self.robot_body.pose()
		parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
		self.body_xyz = (
		parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2])  # torso z is more informative than mean z
		self.body_rpy = body_pose.rpy()
		z = self.body_xyz[2]
		if self.initial_z == None:
			self.initial_z = z
		r, p, yaw = self.body_rpy
		self.walk_target_theta = np.arctan2(self.walk_target_y - self.body_xyz[1],
											self.walk_target_x - self.body_xyz[0])
		self.walk_target_dist = np.linalg.norm(
			[self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0]])
		angle_to_target = self.walk_target_theta - yaw

		rot_speed = np.array(
			[[np.cos(-yaw), -np.sin(-yaw), 0],
			 [np.sin(-yaw), np.cos(-yaw), 0],
			 [		0,			 0, 1]]
		)
		vx, vy, vz = np.dot(rot_speed, self.robot_body.speed())  # rotate speed back to body point of view

		more = np.array([ z-self.initial_z,
			np.sin(angle_to_target), np.cos(angle_to_target),
			0.3* vx , 0.3* vy , 0.3* vz ,  # 0.3 is just scaling typical speed into -1..+1, no physical sense here
			r, p], dtype=np.float32)
		return np.clip( np.concatenate([more] + [j] + [self.feet_contact]), -5, +5) 
Example 63
Project: soccer-matlab   Author: utra-robosoccer   File: robot_locomotors.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def potential_leak(self):
		z = self.body_xyz[2]		  # 0.00 .. 0.8 .. 1.05 normal walk, 1.2 when jumping
		z = np.clip(z, 0, 0.8)
		return z/0.8 + 1.0			# 1.00 .. 2.0 
Example 64
Project: soccer-matlab   Author: utra-robosoccer   File: robot_manipulators.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def apply_action(self, a):
		assert (np.isfinite(a).all())
		self.central_joint.set_motor_torque(0.05 * float(np.clip(a[0], -1, +1)))
		self.elbow_joint.set_motor_torque(0.05 * float(np.clip(a[1], -1, +1))) 
Example 65
Project: soccer-matlab   Author: utra-robosoccer   File: robot_manipulators.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def apply_action(self, a):
		assert (np.isfinite(a).all())
		self.shoulder_pan_joint.set_motor_torque(0.05 * float(np.clip(a[0], -1, +1)))
		self.shoulder_lift_joint.set_motor_torque(0.05 * float(np.clip(a[1], -1, +1)))
		self.upper_arm_roll_joint.set_motor_torque(0.05 * float(np.clip(a[2], -1, +1)))
		self.elbow_flex_joint.set_motor_torque(0.05 * float(np.clip(a[3], -1, +1)))
		self.upper_arm_roll_joint.set_motor_torque(0.05 * float(np.clip(a[4], -1, +1)))
		self.wrist_flex_joint.set_motor_torque(0.05 * float(np.clip(a[5], -1, +1)))
		self.wrist_roll_joint.set_motor_torque(0.05 * float(np.clip(a[6], -1, +1))) 
Example 66
Project: soccer-matlab   Author: utra-robosoccer   File: robot_manipulators.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def apply_action(self, a):
		assert (np.isfinite(a).all())
		self.shoulder_pan_joint.set_motor_torque(0.05 * float(np.clip(a[0], -1, +1)))
		self.shoulder_lift_joint.set_motor_torque(0.05 * float(np.clip(a[1], -1, +1)))
		self.upper_arm_roll_joint.set_motor_torque(0.05 * float(np.clip(a[2], -1, +1)))
		self.elbow_flex_joint.set_motor_torque(0.05 * float(np.clip(a[3], -1, +1)))
		self.upper_arm_roll_joint.set_motor_torque(0.05 * float(np.clip(a[4], -1, +1)))
		self.wrist_flex_joint.set_motor_torque(0.05 * float(np.clip(a[5], -1, +1)))
		self.wrist_roll_joint.set_motor_torque(0.05 * float(np.clip(a[6], -1, +1))) 
Example 67
Project: soccer-matlab   Author: utra-robosoccer   File: robot_manipulators.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def apply_action(self, a):
		assert (np.isfinite(a).all())
		self.shoulder_pan_joint.set_motor_torque(0.05 * float(np.clip(a[0], -1, +1)))
		self.shoulder_lift_joint.set_motor_torque(0.05 * float(np.clip(a[1], -1, +1)))
		self.upper_arm_roll_joint.set_motor_torque(0.05 * float(np.clip(a[2], -1, +1)))
		self.elbow_flex_joint.set_motor_torque(0.05 * float(np.clip(a[3], -1, +1)))
		self.upper_arm_roll_joint.set_motor_torque(0.05 * float(np.clip(a[4], -1, +1)))
		self.wrist_flex_joint.set_motor_torque(0.05 * float(np.clip(a[5], -1, +1)))
		self.wrist_roll_joint.set_motor_torque(0.05 * float(np.clip(a[6], -1, +1))) 
Example 68
Project: soccer-matlab   Author: utra-robosoccer   File: robot_pendula.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def apply_action(self, a):
		assert( np.isfinite(a).all() )
		self.slider.set_motor_torque( 200*float(np.clip(a[0], -1, +1)) ) 
Example 69
Project: soccer-matlab   Author: utra-robosoccer   File: wrappers.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def step(self, action):
    action_space = self._env.action_space
    action = np.clip(action, action_space.low, action_space.high)
    return self._env.step(action) 
Example 70
Project: cat-bbs   Author: aleju   File: train.py    MIT License 4 votes vote down vote up
def bb_coords_to_grid(bb_coords_one, img_shape, grid_size):
    """Convert bounding box coordinates (corners) to ground truth heatmaps."""
    if isinstance(bb_coords_one, ia.KeypointsOnImage):
        bb_coords_one = bb_coords_one.keypoints

    # bb edges after augmentation
    x1b = min([kp.x for kp in bb_coords_one])
    x2b = max([kp.x for kp in bb_coords_one])
    y1b = min([kp.y for kp in bb_coords_one])
    y2b = max([kp.y for kp in bb_coords_one])

    # clip
    x1c = np.clip(x1b, 0, img_shape[1]-1)
    y1c = np.clip(y1b, 0, img_shape[0]-1)
    x2c = np.clip(x2b, 0, img_shape[1]-1)
    y2c = np.clip(y2b, 0, img_shape[0]-1)

    # project
    x1d = int((x1c / img_shape[1]) * grid_size)
    y1d = int((y1c / img_shape[0]) * grid_size)
    x2d = int((x2c / img_shape[1]) * grid_size)
    y2d = int((y2c / img_shape[0]) * grid_size)

    assert 0 <= x1d < grid_size
    assert 0 <= y1d < grid_size
    assert 0 <= x2d < grid_size
    assert 0 <= y2d < grid_size

    # output ground truth:
    # - 1 heatmap that is 1 everywhere where there is a bounding box
    # - 9 position sensitive heatmaps,
    #   e.g. the first one is 1 everywhere where there is the _top left corner_
    #        of a bounding box,
    #        the second one is 1 for the top center cell,
    #        the third one is 1 for the top right corner,
    #        ...
    grids = np.zeros((grid_size, grid_size, 1+9), dtype=np.float32)
    # first heatmap
    grids[y1d:y2d+1, x1d:x2d+1, 0] = 1
    # position sensitive heatmaps
    nb_cells_x = 3
    nb_cells_y = 3
    cell_width = (x2d - x1d) / nb_cells_x
    cell_height = (y2d - y1d) / nb_cells_y
    cell_counter = 0
    for j in range(nb_cells_y):
        cell_y1 = y1d + cell_height * j
        cell_y2 = cell_y1 + cell_height
        cell_y1_int = np.clip(int(math.floor(cell_y1)), 0, img_shape[0]-1)
        cell_y2_int = np.clip(int(math.floor(cell_y2)), 0, img_shape[0]-1)
        for i in range(nb_cells_x):
            cell_x1 = x1d + cell_width * i
            cell_x2 = cell_x1 + cell_width
            cell_x1_int = np.clip(int(math.floor(cell_x1)), 0, img_shape[1]-1)
            cell_x2_int = np.clip(int(math.floor(cell_x2)), 0, img_shape[1]-1)
            grids[cell_y1_int:cell_y2_int+1, cell_x1_int:cell_x2_int+1, 1+cell_counter] = 1
            cell_counter += 1
    return grids 
Example 71
Project: ieml   Author: IEMLdev   File: relations.py    GNU General Public License v3.0 4 votes vote down vote up
def _compute_relations(dictionary):
        # print("Computing relations", file=sys.stderr)
        # logger.log(logging.DEBUG, "Computing tables relations")
        # logger.log(logging.DEBUG, "Computing contains/contained relations")
        # logger.log(logging.DEBUG, "Computing father/child relations")
        # print("Computing siblings relations", sys.stderr)

        relations = {}
        contains = RelationsGraph._compute_contains(dictionary)
        relations['contains'] = csr_matrix(contains)
        relations['contained'] = csr_matrix(relations['contains'].transpose())

        father = RelationsGraph._compute_father(dictionary)

        for i, r in enumerate(['_substance', '_attribute', '_mode']):
            relations['father' + r] = dok_matrix(father[i])

        siblings = RelationsGraph._compute_siblings(dictionary)
        relations['opposed'] = dok_matrix(siblings[0])
        relations['associated'] = dok_matrix(siblings[1])
        relations['crossed'] = dok_matrix(siblings[2])
        relations['twin'] = dok_matrix(siblings[3])

        # self._do_inhibitions()

        for i, r in enumerate(['_substance', '_attribute', '_mode']):
            relations['child' + r] = relations['father' + r].transpose()

        # self.relations['siblings'] = sum(siblings)
        # self.relations['inclusion'] = np.clip(self.relations['contains'] + self.relations['contained'], 0, 1)
        # self.relations['father'] = self.relations['father_substance'] + \
        #                            self.relations['father_attribute'] + \
        #                            self.relations['father_mode']
        # self.relations['child'] = self.relations['child_substance'] + \
        #                           self.relations['child_attribute'] + \
        #                           self.relations['child_mode']
        # self.relations['etymology'] = self.relations['father'] + self.relations['child']

        table = RelationsGraph._compute_table_rank(dictionary, relations['contained'])
        for i in range(6):
            relations['table_%d'%i] = table[i]

        relations['identity'] = csr_matrix(np.eye(len(dictionary)))

        missing = {s for s in RELATIONS if s not in relations}
        if missing:
            raise ValueError("Missing relations : {%s}"%", ".join(missing))

        return {reltype: csr_matrix(relations[reltype]) for reltype in RELATIONS} 
Example 72
Project: DataHack2018   Author: InnovizTech   File: vis_utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def _update_camera(self, task):
        scale = 0.3
        if 'w' in self._buttons_state and self._buttons_state['w']:
            if 'control' in self._buttons_state and self._buttons_state['control']:
                self._pan_camera((0., 0., scale))
            elif 'alt' in self._buttons_state and self._buttons_state['alt']:
                self._zoom(1.)
            else:
                self._pan_camera((0., scale, 0.))
        if 's' in self._buttons_state and self._buttons_state['s']:
            if 'control' in self._buttons_state and self._buttons_state['control']:
                self._pan_camera((0., 0., -scale))
            elif 'alt' in self._buttons_state and self._buttons_state['alt']:
                self._zoom(-1.)
            else:
                self._pan_camera((0., -scale, 0.))
        if 'a' in self._buttons_state and self._buttons_state['a']:
            self._pan_camera((-scale, 0., 0.))
        if 'd' in self._buttons_state and self._buttons_state['d']:
            self._pan_camera((scale, 0., 0.))

        if 'wheel_up' in self._buttons_state and self._buttons_state['wheel_up']:
            self._zoom(2^base.camera.getPos()[2])
        if 'wheel_down' in self._buttons_state and self._buttons_state['wheel_down']:
            self._zoom(-2^base.camera.getPos()[2])

        if 'mouse1' in self._buttons_state and self._buttons_state['mouse1'] and base.mouseWatcherNode.hasMouse():
            mouse_pos = np.array(base.mouseWatcherNode.getMouse())
            if np.linalg.norm(mouse_pos - self._prev_mouse_location) > 1e-2:
                prev_mouse_location = self._prev_mouse_location.copy()
                self._prev_mouse_location = mouse_pos
                diff = mouse_pos-prev_mouse_location
                self._rotate_camera(diff)

        if 'mouse3' in self._buttons_state and self._buttons_state['mouse3'] and base.mouseWatcherNode.hasMouse():
            mouse_pos = np.array(base.mouseWatcherNode.getMouse())
            if self._prev_mouse_location_pen is None or np.linalg.norm(mouse_pos - self._prev_mouse_location_pen) > 1e-2:
                if self._prev_mouse_location_pen is not None:
                    prev_mouse_location = self._prev_mouse_location_pen.copy()
                else:
                    prev_mouse_location = mouse_pos
                self._prev_mouse_location_pen = mouse_pos
                diff = (mouse_pos-prev_mouse_location) * np.clip(base.camera.getPos()[2], 5, 40)
                self._pan_camera_mouse((diff[0], diff[1]))
        else:
            self._prev_mouse_location_pen = None
        return task.cont 
Example 73
Project: Tacotron   Author: ElwynWang   File: signal_process.py    GNU General Public License v3.0 4 votes vote down vote up
def get_spectrograms(fpath):
    '''Returns normalized log(melspectrogram) and log(magnitude) from `sound_file`.
    Args:
      sound_file: A string. The full path of a sound file.

    Returns:
      mel: A 2d array of shape (T, n_mels) <- Transposed
      mag: A 2d array of shape (T, 1+n_fft/2) <- Transposed
    '''
    # Loading sound file
    y, sr = librosa.load(fpath, sr=Hp.sample_rate)

    # Trimming
    y, _ = librosa.effects.trim(y)

    # Preemphasis
    y = np.append(y[0], y[1:] - Hp.preemphasis * y[:-1])

    # stft
    linear = librosa.stft(y=y,
                          n_fft=Hp.num_fft,
                          hop_length=Hp.hop_length,
                          win_length=Hp.win_length)

    # magnitude spectrogram
    mag = np.abs(linear)  # (1+n_fft//2, T)

    # mel spectrogram
    mel_basis = librosa.filters.mel(Hp.sample_rate, Hp.num_fft, Hp.num_mels)  # (n_mels, 1+n_fft//2)
    mel = np.dot(mel_basis, mag)  # (n_mels, t)

    # to decibel
    mel = 20 * np.log10(np.maximum(1e-5, mel))
    mag = 20 * np.log10(np.maximum(1e-5, mag))

    # normalize
    mel = np.clip((mel - Hp.ref_db + Hp.max_db) / Hp.max_db, 1e-8, 1)
    mag = np.clip((mag - Hp.ref_db + Hp.max_db) / Hp.max_db, 1e-8, 1)

    # Transpose
    mel = mel.T.astype(np.float32)  # (T, n_mels)
    mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    return mel, mag 
Example 74
Project: dc_tts   Author: Kyubyong   File: utils.py    Apache License 2.0 4 votes vote down vote up
def get_spectrograms(fpath):
    '''Parse the wave file in `fpath` and
    Returns normalized melspectrogram and linear spectrogram.

    Args:
      fpath: A string. The full path of a sound file.

    Returns:
      mel: A 2d array of shape (T, n_mels) and dtype of float32.
      mag: A 2d array of shape (T, 1+n_fft/2) and dtype of float32.
    '''
    # Loading sound file
    y, sr = librosa.load(fpath, sr=hp.sr)

    # Trimming
    y, _ = librosa.effects.trim(y)

    # Preemphasis
    y = np.append(y[0], y[1:] - hp.preemphasis * y[:-1])

    # stft
    linear = librosa.stft(y=y,
                          n_fft=hp.n_fft,
                          hop_length=hp.hop_length,
                          win_length=hp.win_length)

    # magnitude spectrogram
    mag = np.abs(linear)  # (1+n_fft//2, T)

    # mel spectrogram
    mel_basis = librosa.filters.mel(hp.sr, hp.n_fft, hp.n_mels)  # (n_mels, 1+n_fft//2)
    mel = np.dot(mel_basis, mag)  # (n_mels, t)

    # to decibel
    mel = 20 * np.log10(np.maximum(1e-5, mel))
    mag = 20 * np.log10(np.maximum(1e-5, mag))

    # normalize
    mel = np.clip((mel - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
    mag = np.clip((mag - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)

    # Transpose
    mel = mel.T.astype(np.float32)  # (T, n_mels)
    mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    return mel, mag 
Example 75
Project: mmdetection   Author: open-mmlab   File: transforms.py    Apache License 2.0 4 votes vote down vote up
def __call__(self, results):
        img = results['img']
        margin_h = max(img.shape[0] - self.crop_size[0], 0)
        margin_w = max(img.shape[1] - self.crop_size[1], 0)
        offset_h = np.random.randint(0, margin_h + 1)
        offset_w = np.random.randint(0, margin_w + 1)
        crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
        crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]

        # crop the image
        img = img[crop_y1:crop_y2, crop_x1:crop_x2, :]
        img_shape = img.shape
        results['img'] = img
        results['img_shape'] = img_shape

        # crop bboxes accordingly and clip to the image boundary
        for key in results.get('bbox_fields', []):
            bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
                                   dtype=np.float32)
            bboxes = results[key] - bbox_offset
            bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
            bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
            results[key] = bboxes

        # filter out the gt bboxes that are completely cropped
        if 'gt_bboxes' in results:
            gt_bboxes = results['gt_bboxes']
            valid_inds = (gt_bboxes[:, 2] > gt_bboxes[:, 0]) & (
                gt_bboxes[:, 3] > gt_bboxes[:, 1])
            # if no gt bbox remains after cropping, just skip this image
            if not np.any(valid_inds):
                return None
            results['gt_bboxes'] = gt_bboxes[valid_inds, :]
            if 'gt_labels' in results:
                results['gt_labels'] = results['gt_labels'][valid_inds]

            # filter and crop the masks
            if 'gt_masks' in results:
                valid_gt_masks = []
                for i in np.where(valid_inds)[0]:
                    gt_mask = results['gt_masks'][i][crop_y1:crop_y2,
                                                     crop_x1:crop_x2]
                    valid_gt_masks.append(gt_mask)
                results['gt_masks'] = valid_gt_masks

        return results 
Example 76
Project: neural-fingerprinting   Author: StephanZheng   File: dataset_helper.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def enforce_epsilon_and_compute_hash(dataset_batch_dir, adv_dir, output_dir,
                                     epsilon):
  """Enforces size of perturbation on images, and compute hashes for all images.

  Args:
    dataset_batch_dir: directory with the images of specific dataset batch
    adv_dir: directory with generated adversarial images
    output_dir: directory where to copy result
    epsilon: size of perturbation

  Returns:
    dictionary with mapping form image ID to hash.
  """
  dataset_images = [f for f in os.listdir(dataset_batch_dir)
                    if f.endswith('.png')]
  image_hashes = {}
  resize_warning = False
  for img_name in dataset_images:
    if not os.path.exists(os.path.join(adv_dir, img_name)):
      logging.warning('Image %s not found in the output', img_name)
      continue
    image = np.array(
        Image.open(os.path.join(dataset_batch_dir, img_name)).convert('RGB'))
    image = image.astype('int32')
    image_max_clip = np.clip(image + epsilon, 0, 255).astype('uint8')
    image_min_clip = np.clip(image - epsilon, 0, 255).astype('uint8')
    # load and resize adversarial image if needed
    adv_image = Image.open(os.path.join(adv_dir, img_name)).convert('RGB')
    # Image.size is reversed compared to np.array.shape
    if adv_image.size[::-1] != image.shape[:2]:
      resize_warning = True
      adv_image = adv_image.resize((image.shape[1], image.shape[0]),
                                   Image.BICUBIC)
    adv_image = np.array(adv_image)
    clipped_adv_image = np.clip(adv_image,
                                image_min_clip,
                                image_max_clip)
    Image.fromarray(clipped_adv_image).save(os.path.join(output_dir, img_name))
    # compute hash
    image_hashes[img_name[:-4]] = hashlib.sha1(
        clipped_adv_image.view(np.uint8)).hexdigest()
  if resize_warning:
    logging.warning('One or more adversarial images had incorrect size')
  return image_hashes 
Example 77
Project: neural-pipeline   Author: toodef   File: tensorboard.py    MIT License 4 votes vote down vote up
def _update_metrics(self, metrics: [AbstractMetric], metrics_groups: [MetricsGroup]) -> None:
        """
        Update console

        :param metrics: metrics
        """

        def process_metric(cur_metric, parent_tag: str = None):
            def add_histogram(name: str, vals, step_num, bins):
                try:
                    self.__writer.add_histogram(name, vals, step_num, bins)
                except:
                    pass

            tag = lambda name: name if parent_tag is None else '{}/{}'.format(parent_tag, name)

            if isinstance(cur_metric, MetricsGroup):
                for m in cur_metric.metrics():
                    if m.get_values().size > 0:
                        self.__writer.add_scalars(tag(m.name()), {m.name(): np.mean(m.get_values())}, global_step=self.epoch_num)
                        add_histogram(tag(m.name()) + '_hist',
                                      np.clip(m.get_values(), m.min_val(), m.max_val()).astype(np.float32),
                                      self.epoch_num, np.linspace(m.min_val(), m.max_val(), num=11).astype(np.float32))
            else:
                values = cur_metric.get_values().astype(np.float32)
                if values.size > 0:
                    self.__writer.add_scalar(tag(cur_metric.name()), float(np.mean(values)), global_step=self.epoch_num)
                    add_histogram(tag(cur_metric.name()) + '_hist',
                                  np.clip(values, cur_metric.min_val(), cur_metric.max_val()).astype(np.float32),
                                  self.epoch_num, np.linspace(cur_metric.min_val(), cur_metric.max_val(), num=11).astype(np.float32))

        if self.__writer is None:
            return

        for metric in metrics:
            process_metric(metric)

        for metrics_group in metrics_groups:
            for metric in metrics_group.metrics():
                process_metric(metric, metrics_group.name())
            for group in metrics_group.groups():
                process_metric(group, metrics_group.name()) 
Example 78
Project: core   Author: lifemapper   File: mcpa.py    GNU General Public License v3.0 4 votes vote down vote up
def get_p_values(observed_value, test_values, num_permutations=None):
    """Gets an array of P-Values

    Gets an (1 or 2 dimension) array of P values where the P value for an array
        location is determined by finding the number of test values at 
        corresponding locations are greater than or equal to that value and
        then dividing that number by the number of permutations

    Args:
        observed_value (Matrix): An array of observed values to use as a
            reference.
        test_values (Matrix): A list of arrays generated from randomizations
            that will be compared to the observed
        num_permutations: (optional) The total number of randomizations 
            performed.  Divide the P-values by this if provided.

    Todo:
        Deprecate this in favor of new method that is more flexible
    """
    if num_permutations is None:
        num_permutations = 1.0

    # Create the P-Values matrix
    p_vals = np.zeros(observed_value.data.shape, dtype=float)
    # For each matrix in test values
    for test_mtx in test_values:
        # Add 1 where every value in the test matrix is greater than or equal
        #    to the value in the observed value.  Numpy comparisons will create
        #    a matrix of boolean values for each cell, which when added to the
        #    p_vals matrix will be treated as 1 for True and 0 for False

        # If this is a stack
        if test_mtx.data.ndim == 3:
            for i in range(test_mtx.data.shape[2]):
                p_vals += np.abs(np.round(test_mtx.data[:,:,[i]], 5)
                                 ) >= np.abs(np.round(observed_value.data, 5))
        else:
            p_vals += np.abs(np.round(test_mtx.data, 5)
                             ) >= np.abs(np.round(observed_value.data, 5))
    # Reshape and adding depth header
    if len(p_vals.shape) == 2:
        p_vals = np.expand_dims(p_vals, axis=2)
    headers = observed_value.headers
    headers['2'] = ['P-values']
    # Scale and return the p-values matrix
    ret_data_tmp = np.nan_to_num(p_vals / num_permutations)
    return Matrix(np.clip(ret_data_tmp, -1.0, 1.0), headers=headers)

# ............................................................................. 
Example 79
Project: core   Author: lifemapper   File: permutation_testing.py    GNU General Public License v3.0 4 votes vote down vote up
def get_p_values(observed_matrix, test_matrices,
                 compare_func=compare_absolute_values):
    """Gets p-values by comparing the observed and random data

    Args:
        observed_matrix (:obj: `Matrix`): A Matrix object with observed values
        test_matrices (:obj: `list`): A list of Matrix objects with values
            obtained through permutations
        compare_func (:obj: `function`): A function that, when given two
            values, returns True if the second meets the condition

    Todo:
        * Take optional clip values
        * Take optional number of permutations
    """
    p_val_headers = deepcopy(observed_matrix.headers)
    ndim = observed_matrix.data.ndim
    p_val_headers[str(ndim - 1)] = ['P-Values']
    
    # Create the P-values matrix.  The shape should be the same as the observed
    #    data with one extra dimension if the last dimension has size > 1
    if observed_matrix.data.shape[-1] == 1:
        p_vals_shape = observed_matrix.data.shape
    else:
        p_vals_shape = list(observed_matrix.data.shape) + [1]
    p_values = Matrix(np.zeros(p_vals_shape), headers=observed_matrix.headers)

    num_permutations = 0
    for rand in test_matrices:
        # If the random matrices are a stack with more dimensions or more
        #    layers, compare each layer to observed
        if rand.data.ndim > ndim or \
            rand.data.shape[-1] > observed_matrix.data.shape[-1]:
            # Determine shape of test matrix
            if rand.data.ndim > ndim:
                test_shape = list(rand.data.shape)[:-1]
            else:
                test_shape = observed_matrix.data.shape
            # Loop through each
            for i in range(rand.data.shape[-1]):
                p_values.data += compare_func(
                    observed_matrix.data,
                    # Slice off one test layer
                    rand.data[..., i].reshape(test_shape))
                num_permutations += 1
        else:
            p_values.data += compare_func(observed_matrix.data, rand.data)
            num_permutations += 1

    # Divide by number of permutations and clip just in case
    p_values.data = np.clip(
        np.nan_to_num(p_values.data / num_permutations), 0.0, 1.0)
    return p_values 
Example 80
Project: GST-Tacotron   Author: KinglittleQ   File: utils.py    MIT License 4 votes vote down vote up
def get_spectrograms(fpath):
    '''Returns normalized log(melspectrogram) and log(magnitude) from `sound_file`.
    Args:
      sound_file: A string. The full path of a sound file.
    Returns:
      mel: A 2d array of shape (T, n_mels) <- Transposed
      mag: A 2d array of shape (T, 1+n_fft/2) <- Transposed
    '''

    # Loading sound file
    y, sr = librosa.load(fpath, sr=hp.sr)

    # Trimming
    y, _ = librosa.effects.trim(y)

    # Preemphasis
    y = np.append(y[0], y[1:] - hp.preemphasis * y[:-1])

    # stft
    linear = librosa.stft(y=y,
                          n_fft=hp.n_fft,
                          hop_length=hp.hop_length,
                          win_length=hp.win_length)

    # magnitude spectrogram
    mag = np.abs(linear)  # (1+n_fft//2, T)

    # mel spectrogram
    mel_basis = librosa.filters.mel(hp.sr, hp.n_fft, hp.n_mels)  # (n_mels, 1+n_fft//2)
    mel = np.dot(mel_basis, mag)  # (n_mels, t)

    # to decibel
    mel = 20 * np.log10(np.maximum(1e-5, mel))
    mag = 20 * np.log10(np.maximum(1e-5, mag))

    # normalize
    mel = np.clip((mel - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
    mag = np.clip((mag - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)

    # Transpose
    mel = mel.T.astype(np.float32)  # (T, n_mels)
    mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    return mel, mag