Python numpy.clip() Examples
The following are 30
code examples of numpy.clip().
These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source Project: pruning_yolov3 Author: zbyuan File: datasets.py License: GNU General Public License v3.0 | 7 votes |
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): x = (np.random.uniform(-1, 1, 3) * np.array([hgain, sgain, vgain]) + 1).astype(np.float32) # random gains img_hsv = (cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x.reshape((1, 1, 3))).clip(None, 255).astype(np.uint8) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed # def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): # original version # # SV augmentation by 50% # img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # hue, sat, val # # S = img_hsv[:, :, 1].astype(np.float32) # saturation # V = img_hsv[:, :, 2].astype(np.float32) # value # # a = random.uniform(-1, 1) * sgain + 1 # b = random.uniform(-1, 1) * vgain + 1 # S *= a # V *= b # # img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255) # img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255) # cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
Example #2
Source Project: cat-bbs Author: aleju File: common.py License: MIT License | 6 votes |
def draw_heatmap(img, heatmap, alpha=0.5): """Draw a heatmap overlay over an image.""" assert len(heatmap.shape) == 2 or \ (len(heatmap.shape) == 3 and heatmap.shape[2] == 1) assert img.dtype in [np.uint8, np.int32, np.int64] assert heatmap.dtype in [np.float32, np.float64] if img.shape[0:2] != heatmap.shape[0:2]: heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8) heatmap_rs = ia.imresize_single_image( heatmap_rs[..., np.newaxis], img.shape[0:2], interpolation="nearest" ) heatmap = np.squeeze(heatmap_rs) / 255.0 cmap = plt.get_cmap('jet') heatmap_cmapped = cmap(heatmap) heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2) heatmap_cmapped = heatmap_cmapped * 255 mix = (1-alpha) * img + alpha * heatmap_cmapped mix = np.clip(mix, 0, 255).astype(np.uint8) return mix
Example #3
Source Project: disentangling_conditional_gans Author: zalandoresearch File: dataset_tool.py License: MIT License | 6 votes |
def add_image(self, img): if self.print_progress and self.cur_images % self.progress_interval == 0: print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True) sys.stdout.flush() if self.shape is None: self.shape = img.shape self.resolution_log2 = int(np.log2(self.shape[1])) assert self.shape[0] in [1, 3] assert self.shape[1] == self.shape[2] assert self.shape[1] == 2**self.resolution_log2 tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE) for lod in range(self.resolution_log2 - 1): tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod) self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt)) assert img.shape == self.shape for lod, tfr_writer in enumerate(self.tfr_writers): if lod: img = img.astype(np.float32) img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25 quant = np.rint(img).clip(0, 255).astype(np.uint8) ex = tf.train.Example(features=tf.train.Features(feature={ 'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)), 'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))})) tfr_writer.write(ex.SerializeToString()) self.cur_images += 1
Example #4
Source Project: mmdetection Author: open-mmlab File: structures.py License: Apache License 2.0 | 6 votes |
def crop(self, bbox): """See :func:`BaseInstanceMasks.crop`.""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = np.empty((0, h, w), dtype=np.uint8) else: cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] return BitmapMasks(cropped_masks, h, w)
Example #5
Source Project: neural-fingerprinting Author: StephanZheng File: pgd_cw_whitebox.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def perturb(self, x_nat, y, sess): """Given a set of examples (x_nat, y), returns a set of adversarial examples within epsilon of x_nat in l_infinity norm.""" if self.rand: x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape) else: x = np.copy(x_nat) for i in range(self.k): grad = sess.run(self.grad, feed_dict={self.model.x_input: x, self.model.y_input: y}) x += self.a * np.sign(grad) x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon) x = np.clip(x, 0, 1) # ensure valid pixel range return x
Example #6
Source Project: neural-fingerprinting Author: StephanZheng File: pgd_whitebox.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def perturb(self, x_nat, y, sess): """Given a set of examples (x_nat, y), returns a set of adversarial examples within epsilon of x_nat in l_infinity norm.""" if self.rand: x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape) else: x = np.copy(x_nat) for i in range(self.k): grad = sess.run(self.grad, feed_dict={self.model.x_input: x, self.model.y_input: y}) x += self.a * np.sign(grad) x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon) x = np.clip(x, 0, 1) # ensure valid pixel range return x
Example #7
Source Project: neural-fingerprinting Author: StephanZheng File: pgd_cw_whitebox.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def perturb(self, x_nat, y, sess): """Given a set of examples (x_nat, y), returns a set of adversarial examples within epsilon of x_nat in l_infinity norm.""" if self.rand: x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape) else: x = np.copy(x_nat) for i in range(self.k): grad = sess.run(self.grad, feed_dict={self.model.x_input: x, self.model.y_input: y}) x += self.a * np.sign(grad) x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon) x = np.clip(x, 0, 1) # ensure valid pixel range return x
Example #8
Source Project: neural-fingerprinting Author: StephanZheng File: pgd_whitebox.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def perturb(self, x_nat, y, sess): """Given a set of examples (x_nat, y), returns a set of adversarial examples within epsilon of x_nat in l_infinity norm.""" if self.rand: x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape) else: x = np.copy(x_nat) for i in range(self.k): grad = sess.run(self.grad, feed_dict={self.model.x_input: x, self.model.y_input: y}) x += self.a * np.sign(grad) x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon) x = np.clip(x, 0, 1) # ensure valid pixel range return x
Example #9
Source Project: neural-fingerprinting Author: StephanZheng File: pgd_cw_whitebox.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def perturb(self, x_nat, y, sess): """Given a set of examples (x_nat, y), returns a set of adversarial examples within epsilon of x_nat in l_infinity norm.""" if self.rand: x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape) else: x = np.copy(x_nat) for i in range(self.k): grad = sess.run(self.grad, feed_dict={self.model.x_input: x, self.model.y_input: y}) x += self.a * np.sign(grad) x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon) x = np.clip(x, 0, 1) # ensure valid pixel range return x
Example #10
Source Project: neural-fingerprinting Author: StephanZheng File: utils.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True): def attack(a): session = tf.Session() with session.as_default(): model = RVBCleverhansModel(a) adversarial_image = cleverhans_attack_fn(model, session, a) adversarial_image = np.squeeze(adversarial_image, axis=0) if reset: # optionally, reset to ignore other adversarials # found during the search a._reset() # run predictions to make sure the returned adversarial # is taken into account min_, max_ = a.bounds() adversarial_image = np.clip(adversarial_image, min_, max_) a.predictions(adversarial_image) return attack
Example #11
Source Project: Deep_VoiceChanger Author: pstuvwx File: image.py License: MIT License | 6 votes |
def Chainer2PIL(data, rescale=True): data = np.array(data) if rescale: data *= 256 # data += 128 if data.dtype != np.uint8: data = np.clip(data, 0, 255) data = data.astype(np.uint8) if data.shape[0] == 1: buf = data.astype(np.uint8).reshape((data.shape[1], data.shape[2])) else: buf = np.zeros((data.shape[1], data.shape[2], data.shape[0]), dtype=np.uint8) for i in range(3): a = data[i,:,:] buf[:,:,i] = a img = Image.fromarray(buf) return img
Example #12
Source Project: Deep_VoiceChanger Author: pstuvwx File: dataset.py License: MIT License | 6 votes |
def wave2input_image(wave, window, pos=0, pad=0): wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254] wave_image *= window spectrum_image = np.fft.fft(wave_image, axis=1) input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32) np.clip(input_image, 1000, None, out=input_image) np.log(input_image, out=input_image) input_image += bias input_image /= scale if np.max(input_image) > 0.95: print('input image max bigger than 0.95', np.max(input_image)) if np.min(input_image) < 0.05: print('input image min smaller than 0.05', np.min(input_image)) return input_image
Example #13
Source Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: vaegan_mxnet.py License: Apache License 2.0 | 6 votes |
def visual(title, X, activation): '''create a grid of images and save it as a final image title : grid image name X : array of images ''' assert len(X.shape) == 4 X = X.transpose((0, 2, 3, 1)) if activation == 'sigmoid': X = np.clip((X)*(255.0), 0, 255).astype(np.uint8) elif activation == 'tanh': X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8) n = np.ceil(np.sqrt(X.shape[0])) buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8) for i, img in enumerate(X): fill_buf(buff, i, img, X.shape[1:3]) cv2.imwrite('%s.jpg' % (title), buff)
Example #14
Source Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: atari_game.py License: Apache License 2.0 | 6 votes |
def play(self, a): assert not self.episode_terminate,\ "Warning, the episode seems to have terminated. " \ "We need to call either game.begin_episode(max_episode_step) to continue a new " \ "episode or game.start() to force restart." self.episode_step += 1 reward = 0.0 action = self.action_set[a] for i in range(self.frame_skip): reward += self.ale.act(action) self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :]) self.total_reward += reward self.episode_reward += reward ob = self.get_observation() terminate_flag = self.episode_terminate self.replay_memory.append(ob, a, numpy.clip(reward, -1, 1), terminate_flag) return reward, terminate_flag
Example #15
Source Project: GST-Tacotron Author: KinglittleQ File: utils.py License: MIT License | 6 votes |
def spectrogram2wav(mag): '''# Generate wave file from spectrogram''' # transpose mag = mag.T # de-noramlize mag = (np.clip(mag, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db # to amplitude mag = np.power(10.0, mag * 0.05) # wav reconstruction wav = griffin_lim(mag) # de-preemphasis wav = signal.lfilter([1], [1, -hp.preemphasis], wav) # trim wav, _ = librosa.effects.trim(wav) return wav.astype(np.float32)
Example #16
Source Project: soccer-matlab Author: utra-robosoccer File: motor.py License: BSD 2-Clause "Simplified" License | 6 votes |
def convert_to_torque(self, motor_commands, current_motor_angle, current_motor_velocity): """Convert the commands (position control or torque control) to torque. Args: motor_commands: The desired motor angle if the motor is in position control mode. The pwm signal if the motor is in torque control mode. current_motor_angle: The motor angle at the current time step. current_motor_velocity: The motor velocity at the current time step. Returns: actual_torque: The torque that needs to be applied to the motor. observed_torque: The torque observed by the sensor. """ if self._torque_control_enabled: pwm = motor_commands else: pwm = (-self._kp * (current_motor_angle - motor_commands) - self._kd * current_motor_velocity) pwm = np.clip(pwm, -1.0, 1.0) return self._convert_to_torque_from_pwm(pwm, current_motor_velocity)
Example #17
Source Project: Black-Box-Audio Author: rtaori File: run_audio_attack.py License: MIT License | 5 votes |
def save_wav(audio, output_wav_file): wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16)) print('output dB', db(audio))
Example #18
Source Project: Black-Box-Audio Author: rtaori File: run_audio_attack.py License: MIT License | 5 votes |
def setup_graph(self, input_audio_batch, target_phrase): batch_size = input_audio_batch.shape[0] weird = (input_audio_batch.shape[1] - 1) // 320 logits_arg2 = np.tile(weird, batch_size) dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32) dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32) pass_in = np.clip(input_audio_batch, -2**15, 2**15-1) seq_len = np.tile(weird, batch_size).astype(np.int32) with tf.variable_scope('', reuse=tf.AUTO_REUSE): inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a') len_batch = tf.placeholder(tf.float32, name='b') arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c') arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d') arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e') len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f') logits = get_logits(inputs, arg2_logits) target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch) ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq) decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True) sess = tf.Session() saver = tf.train.Saver(tf.global_variables()) saver.restore(sess, "models/session_dump") func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}) func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}) return (func1, func2)
Example #19
Source Project: Black-Box-Audio Author: rtaori File: run_audio_attack.py License: MIT License | 5 votes |
def getctcloss(self, input_audio_batch, target_phrase, decode=False): batch_size = input_audio_batch.shape[0] weird = (input_audio_batch.shape[1] - 1) // 320 logits_arg2 = np.tile(weird, batch_size) dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32) dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32) pass_in = np.clip(input_audio_batch, -2**15, 2**15-1) seq_len = np.tile(weird, batch_size).astype(np.int32) if decode: return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) else: return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
Example #20
Source Project: libTLDA Author: wmkouw File: rba.py License: MIT License | 5 votes |
def iwe_kernel_densities(self, X, Z, clip=1000): """ Estimate importance weights based on kernel density estimation. Parameters ---------- X : array source data (N samples by D features) Z : array target data (M samples by D features) clip : float maximum allowed value for individual weights (def: 1000) Returns ------- array importance weights (N samples by 1) """ # Data shapes N, DX = X.shape M, DZ = Z.shape # Assert equivalent dimensionalities assert DX == DZ # Compute probabilities based on source kernel densities pT = st.gaussian_kde(Z.T).pdf(X.T) pS = st.gaussian_kde(X.T).pdf(X.T) # Check for numerics assert not np.any(np.isnan(pT)) or np.any(pT == 0) assert not np.any(np.isnan(pS)) or np.any(pS == 0) # Compute importance weights iw = pT / pS # Clip importance weights return np.minimum(clip, np.maximum(0, iw))
Example #21
Source Project: libTLDA Author: wmkouw File: scl.py License: MIT License | 5 votes |
def Huber_loss(self, theta, X, y, l2=0.0): """ Huber loss function. Reference: Ando & Zhang (2005a). A framework for learning predictive structures from multiple tasks and unlabeled data. JMLR. Parameters ---------- theta : array classifier parameters (D features by 1) X : array data (N samples by D features) y : array label vector (N samples by 1) l2 : float l2-regularization parameter (def= 0.0) Returns ------- array Objective function value. """ # Precompute terms Xy = (X.T*y.T).T Xyt = np.dot(Xy, theta) # Indices of discontinuity ix = (Xyt >= -1) # Loss function return np.sum(np.clip(1 - Xyt[ix], 0, None)**2, axis=0) \ + np.sum(-4*Xyt[~ix], axis=0) + l2*np.sum(theta**2, axis=0)
Example #22
Source Project: libTLDA Author: wmkouw File: scl.py License: MIT License | 5 votes |
def Huber_grad(self, theta, X, y, l2=0.0): """ Huber gradient computation. Reference: Ando & Zhang (2005a). A framework for learning predictive structures from multiple tasks and unlabeled data. JMLR. Parameters ---------- theta : array classifier parameters (D features by 1) X : array data (N samples by D features) y : array label vector (N samples by 1) l2 : float l2-regularization parameter (def= 0.0) Returns ------- array Gradient with respect to classifier parameters """ # Precompute terms Xy = (X.T*y.T).T Xyt = np.dot(Xy, theta) # Indices of discontinuity ix = (Xyt >= -1) # Gradient return np.sum(2*np.clip(1-Xyt[ix], 0, None).T * -Xy[ix, :].T, axis=1).T + np.sum(-4*Xy[~ix, :], axis=0) + 2*l2*theta
Example #23
Source Project: cat-bbs Author: aleju File: plotting.py License: MIT License | 5 votes |
def _line_to_xy(self, line_x, line_y, limit_y_min=None, limit_y_max=None): point_every = max(1, int(len(line_x) / self.nb_points_max)) points_x = [] points_y = [] curr_sum = 0 counter = 0 last_idx = len(line_x) - 1 for i in range(len(line_x)): batch_idx = line_x[i] if batch_idx > self.start_batch_idx: curr_sum += line_y[i] counter += 1 if counter >= point_every or i == last_idx: points_x.append(batch_idx) y = curr_sum / counter if limit_y_min is not None and limit_y_max is not None: y = np.clip(y, limit_y_min, limit_y_max) elif limit_y_min is not None: y = max(y, limit_y_min) elif limit_y_max is not None: y = min(y, limit_y_max) points_y.append(y) counter = 0 curr_sum = 0 return points_x, points_y
Example #24
Source Project: cat-bbs Author: aleju File: bbs.py License: MIT License | 5 votes |
def fix_by_image_dimensions(self, height, width=None): if isinstance(height, (tuple, list)): assert width is None height, width = height[0], height[1] elif isinstance(height, (np.ndarray, np.generic)): assert width is None height, width = height.shape[0], height.shape[1] else: assert width is not None assert isinstance(height, int) assert isinstance(width, int) self.x1 = int(np.clip(self.x1, 0, width-1)) self.x2 = int(np.clip(self.x2, 0, width-1)) self.y1 = int(np.clip(self.y1, 0, height-1)) self.y2 = int(np.clip(self.y2, 0, height-1)) if self.x1 > self.x2: self.x1, self.x2 = self.x2, self.x1 if self.y1 > self.y2: self.y1, self.y2 = self.y2, self.y1 if self.x1 == self.x2: if self.x1 > 0: self.x1 = self.x1 - 1 else: self.x2 = self.x2 + 1 if self.y1 == self.y2: if self.y1 > 0: self.y1 = self.y1 - 1 else: self.y2 = self.y2 + 1 #self.width = self.x2 - self.x1 #self.height = self.y2 - self.y1
Example #25
Source Project: cat-bbs Author: aleju File: bbs.py License: MIT License | 5 votes |
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, thickness=1, copy=copy): assert img.dtype in [np.uint8, np.float32, np.int32, np.int64] result = np.copy(img) if copy else img for i in range(thickness): y = [self.y1-i, self.y1-i, self.y2+i, self.y2+i] x = [self.x1-i, self.x2+i, self.x2+i, self.x1-i] rr, cc = draw.polygon_perimeter(y, x, shape=img.shape) if alpha >= 0.99: result[rr, cc, 0] = color[0] result[rr, cc, 1] = color[1] result[rr, cc, 2] = color[2] else: if result.dtype == np.float32: result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0] result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1] result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2] result = np.clip(result, 0, 255) else: result = result.astype(np.float32) result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0] result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1] result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2] result = np.clip(result, 0, 255).astype(np.uint8) return result
Example #26
Source Project: cat-bbs Author: aleju File: bbs.py License: MIT License | 5 votes |
def draw_on_image_filled_binary(self, img, copy=True): if copy: img = np.copy(img) h, w = img.shape[0], img.shape[1] x1 = np.clip(self.x1, 0, w-1) x2 = np.clip(self.x2, 0, w-1) y1 = np.clip(self.y1, 0, h-1) y2 = np.clip(self.y2, 0, h-1) if x1 < x2 and y1 < y2: img[self.y1:self.y2, self.x1:self.x2] = 1 return img
Example #27
Source Project: disentangling_conditional_gans Author: zalandoresearch File: util_scripts.py License: MIT License | 5 votes |
def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8): network_pkl = misc.locate_network_pkl(run_id, snapshot) if mp4 is None: mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4' num_frames = int(np.rint(duration_sec * mp4_fps)) random_state = np.random.RandomState(random_seed) print('Loading network from "%s"...' % network_pkl) G, D, Gs = misc.load_network_pkl(run_id, snapshot) print('Generating latent vectors...') shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component] all_latents = random_state.randn(*shape).astype(np.float32) all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap') all_latents /= np.sqrt(np.mean(np.square(all_latents))) # Frame generation func for moviepy. def make_frame(t): frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1)) latents = all_latents[frame_idx] labels = np.zeros([latents.shape[0], 0], np.float32) images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8) grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC if image_zoom > 1: grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0) if grid.shape[2] == 1: grid = grid.repeat(3, 2) # grayscale => RGB return grid # Generate video. import moviepy.editor # pip install moviepy result_subdir = misc.create_result_subdir(config.result_dir, config.desc) moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate) open(os.path.join(result_subdir, '_done.txt'), 'wt').close() #---------------------------------------------------------------------------- # Generate MP4 video of training progress for a previous training run. # To run, uncomment the appropriate line in config.py and launch train.py.
Example #28
Source Project: disentangling_conditional_gans Author: zalandoresearch File: train.py License: MIT License | 5 votes |
def setup_snapshot_image_grid(G, training_set, size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display. layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label. # Select size. gw = 1; gh = 1 if size == '1080p': gw = np.clip(1920 // G.output_shape[3], 3, 32) gh = np.clip(1080 // G.output_shape[2], 2, 32) if size == '4k': gw = np.clip(3840 // G.output_shape[3], 7, 32) gh = np.clip(2160 // G.output_shape[2], 4, 32) # Fill in reals and labels. reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype) labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype) masks = np.zeros([gw * gh] + [1, training_set.shape[-1], training_set.shape[-1]], dtype=training_set.dtype) for idx in range(gw * gh): x = idx % gw; y = idx // gw while True: real, label, mask = training_set.get_minibatch_np(1) if layout == 'row_per_class' and training_set.label_size > 0: if label[0, y % training_set.label_size] == 0.0: continue reals[idx] = real[0] labels[idx] = label[0] masks[idx] = mask[0] break # Generate latents. latents = misc.random_latents(gw * gh, G) return (gw, gh), reals, labels, latents, masks #---------------------------------------------------------------------------- # Just-in-time processing of training images before feeding them to the networks.
Example #29
Source Project: dustmaps Author: gregreen File: plot_bh.py License: GNU General Public License v2.0 | 5 votes |
def numpy2pil(a, vmin, vmax): a = np.clip((a - vmin) / (vmax - vmin), 0., 1.) a = (254.99 * a).astype('u1') return PIL.Image.fromarray(a)
Example #30
Source Project: dustmaps Author: gregreen File: plot_marshall.py License: GNU General Public License v2.0 | 5 votes |
def numpy2pil(a, vmin, vmax, fill=0): mask = np.isnan(a) a = np.clip((a - vmin) / (vmax - vmin), 0., 1.) a = (254.99 * a).astype('u1') a[mask] = fill return PIL.Image.fromarray(a)