Python numpy.max() Examples

The following are code examples for showing how to use numpy.max(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: rhodonite   Author: nestauk   File: phylomemetic.py    MIT License 6 votes vote down vote up
def label_ages(g):
    """label_ages
    Get the ages of each vertex in a graph and put them into
    a property map. The age is defined as the number of steps between
    a vertex and its most distant antecedent.
    
    Parameters
    ----------
        g : :obj:`graph_tool.Graph` 
            A graph.
        
    Returns
    -------
        age_vp : :obj:`graph_tool.VertexPropertyMap` 
            A property map containing the age of each vertex.
    """
    age_vp = g.new_vertex_property('int')
    for v in g.vertices():
        if v.out_degree() == 0:
            age_vp[v] = 0
        else:
            parent_ages = [age_vp[p] for p in v.out_neighbours()]
            max_age = np.max(parent_ages)
            age_vp[v] = max_age + 1
    return age_vp 
Example 2
Project: aospy   Author: spencerahill   File: vertcoord.py    Apache License 2.0 6 votes vote down vote up
def to_radians(arr, is_delta=False):
    """Force data with units either degrees or radians to be radians."""
    # Infer the units from embedded metadata, if it's there.
    try:
        units = arr.units
    except AttributeError:
        pass
    else:
        if units.lower().startswith('degrees'):
            warn_msg = ("Conversion applied: degrees -> radians to array: "
                        "{}".format(arr))
            logging.debug(warn_msg)
            return np.deg2rad(arr)
    # Otherwise, assume degrees if the values are sufficiently large.
    threshold = 0.1*np.pi if is_delta else 4*np.pi
    if np.max(np.abs(arr)) > threshold:
        warn_msg = ("Conversion applied: degrees -> radians to array: "
                    "{}".format(arr))
        logging.debug(warn_msg)
        return np.deg2rad(arr)
    return arr 
Example 3
Project: ml_news_popularity   Author: khaledJabr   File: lr.py    MIT License 6 votes vote down vote up
def update_beta(self, data, targets, betas, j):
        n = len(targets)
        x_no_j = np.delete(data, j, 1)
        b_no_j = np.delete(betas, j, 0)
        norm_x_j = np.linalg.norm(data[:, j])
        # predict_no_j = np.dot(b_no_j, x_no_j.T)
        predicted = x_no_j.dot(b_no_j)

        # use normalized targets
        residuals = targets - predicted
        # p_j = data[:,j].dot(residuals)
        z_j = norm_x_j.dot(residuals)/n
        # print("Max Residual = {}".format(np.max(residuals)))
        # print(p_j)
        # update beta_j based on residuals

        # return self.soft_threshold(p_j, self.alpha*n/2)/(norm_x_j**2)
        return self.soft_threshold(z_j, self.alpha) 
Example 4
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 6 votes vote down vote up
def create_mnist(tfrecord_dir, mnist_dir):
    print('Loading MNIST from "%s"' % mnist_dir)
    import gzip
    with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
        images = np.frombuffer(file.read(), np.uint8, offset=16)
    with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
        labels = np.frombuffer(file.read(), np.uint8, offset=8)
    images = images.reshape(-1, 1, 28, 28)
    images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
    assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (60000,) and labels.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0
    
    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 5
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 6 votes vote down vote up
def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):
    print('Loading MNIST from "%s"' % mnist_dir)
    import gzip
    with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
        images = np.frombuffer(file.read(), np.uint8, offset=16)
    images = images.reshape(-1, 28, 28)
    images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
    assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    
    with TFRecordExporter(tfrecord_dir, num_images) as tfr:
        rnd = np.random.RandomState(random_seed)
        for idx in range(num_images):
            tfr.add_image(images[rnd.randint(images.shape[0], size=3)])

#---------------------------------------------------------------------------- 
Example 6
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 6 votes vote down vote up
def create_cifar100(tfrecord_dir, cifar100_dir):
    print('Loading CIFAR-100 from "%s"' % cifar100_dir)
    import pickle
    with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
        data = pickle.load(file, encoding='latin1')
    images = data['data'].reshape(-1, 3, 32, 32)
    labels = np.array(data['fine_labels'])
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 99
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 7
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 6 votes vote down vote up
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
Example 8
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def db(audio):
    if len(audio.shape) > 1:
        maxx = np.max(np.abs(audio), axis=1)
        return 20 * np.log10(maxx) if np.any(maxx != 0) else np.array([0])
    maxx = np.max(np.abs(audio))
    return 20 * np.log10(maxx) if maxx != 0 else np.array([0]) 
Example 9
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def get_new_pop(elite_pop, elite_pop_scores, pop_size):
    scores_logits = np.exp(elite_pop_scores - elite_pop_scores.max()) 
    elite_pop_probs = scores_logits / scores_logits.sum()
    cand1 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)]
    cand2 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)]
    mask = np.random.rand(pop_size, elite_pop.shape[1]) < 0.5 
    next_pop = mask * cand1 + (1 - mask) * cand2
    return next_pop 
Example 10
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def _split_into_chunks(signal, chunks_per_second=24):
    # TODO currently broken
    raise NotImplemented("Splitting to chunks is currently broken.")
    window_length_ms = 1/chunks_per_second * 1000
    intervals = np.arange(window_length_ms, signal.shape[0], window_length_ms, dtype=np.int32)
    chunks = np.array_split(signal, intervals, axis=0)
    pad_to = _next_power_of_two(np.max([chunk.shape[0] for chunk in chunks]))
    padded_chunks = np.stack(np.concatenate([chunk, np.zeros((pad_to - chunk.shape[0],))]) for chunk in chunks)
    return padded_chunks 
Example 11
Project: chainer-openai-transformer-lm   Author: soskek   File: utils.py    MIT License 5 votes vote down vote up
def np_softmax(x, t=1):
    x = x / t
    x = x - np.max(x, axis=-1, keepdims=True)
    ex = np.exp(x)
    return ex / np.sum(ex, axis=-1, keepdims=True) 
Example 12
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def get_max_x(self):
        return max([group.get_max_x() for group in self.line_groups.itervalues()]) 
Example 13
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def get_max_x(self):
        return max([max(line.xs) if len(line.xs) > 0 else 0 for line in self.lines.itervalues()]) 
Example 14
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def __init__(self, titles, increasing, save_to_fp):
        assert len(titles) == len(increasing)
        n_plots = len(titles)
        self.titles = titles
        self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)])
        self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"]

        self.nb_points_max = 500
        self.save_to_fp = save_to_fp
        self.start_batch_idx = 0
        self.autolimit_y = False
        self.autolimit_y_multiplier = 5

        #self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))
        nrows = max(1, int(math.sqrt(n_plots)))
        ncols = int(math.ceil(n_plots / nrows))
        width = ncols * 10
        height = nrows * 10

        self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height))

        if nrows == 1 and ncols == 1:
            self.axes = [self.axes]
        else:
            self.axes = self.axes.flat

        title_to_ax = dict()
        for idx, (title, ax) in enumerate(zip(self.titles, self.axes)):
            title_to_ax[title] = ax
        self.title_to_ax = title_to_ax

        self.fig.tight_layout()
        self.fig.subplots_adjust(left=0.05) 
Example 15
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def _line_to_xy(self, line_x, line_y, limit_y_min=None, limit_y_max=None):
        point_every = max(1, int(len(line_x) / self.nb_points_max))
        points_x = []
        points_y = []
        curr_sum = 0
        counter = 0
        last_idx = len(line_x) - 1
        for i in range(len(line_x)):
            batch_idx = line_x[i]
            if batch_idx > self.start_batch_idx:
                curr_sum += line_y[i]
                counter += 1
                if counter >= point_every or i == last_idx:
                    points_x.append(batch_idx)
                    y = curr_sum / counter
                    if limit_y_min is not None and limit_y_max is not None:
                        y = np.clip(y, limit_y_min, limit_y_max)
                    elif limit_y_min is not None:
                        y = max(y, limit_y_min)
                    elif limit_y_max is not None:
                        y = min(y, limit_y_max)
                    points_y.append(y)
                    counter = 0
                    curr_sum = 0

        return points_x, points_y 
Example 16
Project: UR5_Controller   Author: tsinghua-rll   File: HAPI.py    MIT License 5 votes vote down vote up
def isLastMovementEnd(self, select=(1, 1, 1, 1, 1, 1)):
        """
        :param select: 1 if dimension is selected
        :return: boolean
        """
        select = np.asarray(select, dtype=np.float32)
        data = self.rtif.receive()
        tar_rad = np.asarray(data["Target Joint Positions"], dtype=np.float32)
        cur_rad = np.asarray(data["Actual Joint Positions"], dtype=np.float32)
        speed = np.asarray(data["Actual Joint Velocities"])
        return np.max(np.abs((tar_rad - cur_rad) * select)) < 1e-4 and np.max(np.abs(speed * select)) < 1e-1 
Example 17
Project: aospy   Author: spencerahill   File: vertcoord.py    Apache License 2.0 5 votes vote down vote up
def to_pascal(arr, is_dp=False):
    """Force data with units either hPa or Pa to be in Pa."""
    threshold = 400 if is_dp else 1200
    if np.max(np.abs(arr)) < threshold:
        warn_msg = "Conversion applied: hPa -> Pa to array: {}".format(arr)
        logging.debug(warn_msg)
        return arr*100.
    return arr 
Example 18
Project: aospy   Author: spencerahill   File: vertcoord.py    Apache License 2.0 5 votes vote down vote up
def to_hpa(arr):
    """Convert pressure array from Pa to hPa (if needed)."""
    if np.max(np.abs(arr)) > 1200.:
        warn_msg = "Conversion applied: Pa -> hPa to array: {}".format(arr)
        logging.debug(warn_msg)
        return arr / 100.
    return arr 
Example 19
Project: aospy   Author: spencerahill   File: var.py    Apache License 2.0 5 votes vote down vote up
def mask_unphysical(self, data):
        """Mask data array where values are outside physically valid range."""
        if not self.valid_range:
            return data
        else:
            return np.ma.masked_outside(data, np.min(self.valid_range),
                                        np.max(self.valid_range)) 
Example 20
Project: RF-Monitor   Author: EarToEarOak   File: gui.py    GNU General Public License v2.0 5 votes vote down vote up
def __on_scan_data(self, event):
        levels = numpy.log10(event['l'])
        levels *= 10
        self._levels = levels

        noise = numpy.percentile(levels,
                                 self._toolbar.get_dynamic_percentile())

        updated = False
        for monitor in self._monitors:
            freq = monitor.get_frequency()
            if monitor.get_enabled():
                monitor.set_noise(noise)
                index = numpy.where(freq == event['f'])[0]
                signal = monitor.set_level(levels[index][0],
                                           event['timestamp'],
                                           self._location)
                if signal is not None:
                    updated = True
                    if signal.end is not None:
                        recording = format_recording(freq, signal)
                        if self._settings.get_push_enable():
                            self._push.send(self._settings.get_push_uri(),
                                            recording)
                        if self._server is not None:
                            self._server.send(recording)

        if updated:
            if self._isSaved:
                self._isSaved = False
                self.__set_title()
                self.__set_timeline()

        self.__set_spectrum(noise)
        self._rssi.set_noise(numpy.mean(levels))
        self._rssi.set_level(numpy.max(levels)) 
Example 21
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: blob.py    MIT License 5 votes vote down vote up
def im_list_to_blob(ims):
  """Convert a list of images into a network input.

  Assumes images are already prepared (means subtracted, BGR order, ...).
  """
  max_shape = np.array([im.shape for im in ims]).max(axis=0)
  num_images = len(ims)
  blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
                  dtype=np.float32)
  for i in range(num_images):
    im = ims[i]
    blob[i, 0:im.shape[0], 0:im.shape[1], :] = im

  return blob 
Example 22
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: blob.py    MIT License 5 votes vote down vote up
def prep_im_for_blob(im, pixel_means, target_size, max_size):
  """Mean subtract and scale an image for use in a blob."""
  im = im.astype(np.float32, copy=False)
  im -= pixel_means
  im_shape = im.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])
  im_scale = float(target_size) / float(im_size_min)
  # Prevent the biggest axis from being more than MAX_SIZE
  if np.round(im_scale * im_size_max) > max_size:
    im_scale = float(max_size) / float(im_size_max)
  im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                  interpolation=cv2.INTER_LINEAR)

  return im, im_scale 
Example 23
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test.py    MIT License 5 votes vote down vote up
def _get_image_blob(im):
  """Converts an image into a network input.
  Arguments:
    im (ndarray): a color image in BGR order
  Returns:
    blob (ndarray): a data blob holding an image pyramid
    im_scale_factors (list): list of image scales (relative to im) used
      in the image pyramid
  """
  im_orig = im.astype(np.float32, copy=True)
  im_orig -= cfg.PIXEL_MEANS

  im_shape = im_orig.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])

  processed_ims = []
  im_scale_factors = []

  for target_size in cfg.TEST.SCALES:
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
      im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
    im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
            interpolation=cv2.INTER_LINEAR)
    im_scale_factors.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors) 
Example 24
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test_train.py    MIT License 5 votes vote down vote up
def _get_image_blob(im):
  """Converts an image into a network input.
  Arguments:
    im (ndarray): a color image in BGR order
  Returns:
    blob (ndarray): a data blob holding an image pyramid
    im_scale_factors (list): list of image scales (relative to im) used
      in the image pyramid
  """
  im_orig = im.astype(np.float32, copy=True)
  im_orig -= cfg.PIXEL_MEANS

  im_shape = im_orig.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])

  processed_ims = []
  im_scale_factors = []

  for target_size in cfg.TEST.SCALES:
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
      im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
    im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
            interpolation=cv2.INTER_LINEAR)
    im_scale_factors.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors) 
Example 25
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example 26
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: coco.py    MIT License 5 votes vote down vote up
def _print_detection_eval_metrics(self, coco_eval):
    IoU_lo_thresh = 0.5
    IoU_hi_thresh = 0.95

    def _get_thr_ind(coco_eval, thr):
      ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
                     (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
      iou_thr = coco_eval.params.iouThrs[ind]
      assert np.isclose(iou_thr, thr)
      return ind

    ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
    ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
    # precision has dims (iou, recall, cls, area range, max dets)
    # area range index 0: all area ranges
    # max dets index 2: 100 per image
    precision = \
      coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
    ap_default = np.mean(precision[precision > -1])
    print(('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
           '~~~~').format(IoU_lo_thresh, IoU_hi_thresh))
    print('{:.1f}'.format(100 * ap_default))
    for cls_ind, cls in enumerate(self.classes):
      if cls == '__background__':
        continue
      # minus 1 because of __background__
      precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
      ap = np.mean(precision[precision > -1])
      print('{:.1f}'.format(100 * ap))

    print('~~~~ Summary metrics ~~~~')
    coco_eval.summarize() 
Example 27
Project: cgp-cnn   Author: sg-nm   File: cgp_config.py    MIT License 5 votes vote down vote up
def __init__(self, rows=30, cols=40, level_back=40, min_active_num=8, max_active_num=50):
        # network configurations depending on the problem
        self.input_num = 1

        self.func_type = ['ConvBlock32_3', 'ConvBlock32_5',
                          'ConvBlock64_3', 'ConvBlock64_5',
                          'ConvBlock128_3', 'ConvBlock128_5',
                          'pool_max', 'pool_ave',
                          'concat', 'sum']
        self.func_in_num = [1, 1,
                            1, 1,
                            1, 1,
                            1, 1,
                            2, 2]

        self.out_num = 1
        self.out_type = ['full']
        self.out_in_num = [1]

        # CGP network configuration
        self.rows = rows
        self.cols = cols
        self.node_num = rows * cols
        self.level_back = level_back
        self.min_active_num = min_active_num
        self.max_active_num = max_active_num

        self.func_type_num = len(self.func_type)
        self.out_type_num = len(self.out_type)
        self.max_in_num = np.max([np.max(self.func_in_num), np.max(self.out_in_num)]) 
Example 28
Project: cgp-cnn   Author: sg-nm   File: cgp_config.py    MIT License 5 votes vote down vote up
def __init__(self, rows=30, cols=40, level_back=40, min_active_num=8, max_active_num=50):
        # network configurations depending on the problem
        self.input_num = 1

        self.func_type = ['ResBlock32_3', 'ResBlock32_5',
                          'ResBlock64_3', 'ResBlock64_5',
                          'ResBlock128_3', 'ResBlock128_5',
                          'pool_max', 'pool_ave',
                          'concat', 'sum']
        self.func_in_num = [1, 1,
                            1, 1,
                            1, 1,
                            1, 1,
                            2, 2]

        self.out_num = 1
        self.out_type = ['full']
        self.out_in_num = [1]

        # CGP network configuration
        self.rows = rows
        self.cols = cols
        self.node_num = rows * cols
        self.level_back = level_back
        self.min_active_num = min_active_num
        self.max_active_num = max_active_num

        self.func_type_num = len(self.func_type)
        self.out_type_num = len(self.out_type)
        self.max_in_num = np.max([np.max(self.func_in_num), np.max(self.out_in_num)]) 
Example 29
Project: prediction-constrained-topic-models   Author: dtak   File: calc_roc_auc_via_bootstrap.py    MIT License 5 votes vote down vote up
def verify_min_examples_per_label(y_NC, min_examples_per_label):
    '''
    
    Examples
    --------
    >>> y_all_0 = np.zeros(10)
    >>> y_all_1 = np.ones(30)
    >>> verify_min_examples_per_label(y_all_0, 3)
    False
    >>> verify_min_examples_per_label(y_all_1, 2)
    False
    >>> verify_min_examples_per_label(np.hstack([y_all_0, y_all_1]), 10)
    True
    >>> verify_min_examples_per_label(np.eye(3), 2)
    False
    '''
    if y_NC.ndim < 2:
        y_NC = np.atleast_2d(y_NC).T
    n_C = np.sum(np.isfinite(y_NC), axis=0)
    n_pos_C = n_C * np.nanmean(y_NC, axis=0)
    min_neg = np.max(n_C - n_pos_C)
    min_pos = np.min(n_pos_C)
    if min_pos < min_examples_per_label:
        return False
    elif min_neg < min_examples_per_label:
        return False
    return True 
Example 30
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: predict.py    MIT License 5 votes vote down vote up
def _softmax(x):
    e_x = np.exp(x - np.max(x))
    out = e_x / e_x.sum()
    return out 
Example 31
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: blob.py    MIT License 5 votes vote down vote up
def im_list_to_blob(ims):
    """Convert a list of images into a network input.

    Assumes images are already prepared (means subtracted, BGR order, ...).
    """
    max_shape = np.array([im.shape for im in ims]).max(axis=0)
    num_images = len(ims)
    blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
                    dtype=np.float32)
    for i in range(num_images):
        im = ims[i]
        blob[i, 0:im.shape[0], 0:im.shape[1], :] = im

    return blob 
Example 32
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: blob.py    MIT License 5 votes vote down vote up
def prep_im_for_blob(im, pixel_means, target_size, max_size):
    """Mean subtract and scale an image for use in a blob."""
    im = im.astype(np.float32, copy=False)
    im -= pixel_means
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                    interpolation=cv2.INTER_LINEAR)

    return im, im_scale 
Example 33
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: test.py    MIT License 5 votes vote down vote up
def _get_image_blob(im):
    """Converts an image into a network input.
    Arguments:
      im (ndarray): a color image in BGR order
    Returns:
      blob (ndarray): a data blob holding an image pyramid
      im_scale_factors (list): list of image scales (relative to im) used
        in the image pyramid
    """
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= cfg.FLAGS2["pixel_means"]

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.FLAGS2["test_scales"]:
        im_scale = float(target_size) / float(im_size_min)
        # Prevent the biggest axis from being more than MAX_SIZE
        if np.round(im_scale * im_size_max) > cfg.FLAGS.test_max_size:
            im_scale = float(cfg.FLAGS.test_max_size) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, np.array(im_scale_factors) 
Example 34
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap 
Example 35
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 5 votes vote down vote up
def create_cifar10(tfrecord_dir, cifar10_dir):
    print('Loading CIFAR-10 from "%s"' % cifar10_dir)
    import pickle
    images = []
    labels = []
    for batch in range(1, 6):
        with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:
            data = pickle.load(file, encoding='latin1')
        images.append(data['data'].reshape(-1, 3, 32, 32))
        labels.append(data['labels'])
    images = np.concatenate(images)
    labels = np.concatenate(labels)
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 36
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 5 votes vote down vote up
def create_svhn(tfrecord_dir, svhn_dir):
    print('Loading SVHN from "%s"' % svhn_dir)
    import pickle
    images = []
    labels = []
    for batch in range(1, 4):
        with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:
            data = pickle.load(file, encoding='latin1')
        images.append(data[0])
        labels.append(data[1])
    images = np.concatenate(images)
    labels = np.concatenate(labels)
    assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (73257,) and labels.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 37
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 5 votes vote down vote up
def radius_of_curvature(self, xvals, yvals, lane_width):
        ym_per_pix = 21./720 # meters per pixel in y dimension

        xm_per_pix = 3.7/lane_width # meteres per pixel in x dimension
        fit_cr = np.polyfit(yvals*ym_per_pix, xvals*xm_per_pix, 2)
        curverad = ((1 + (2*fit_cr[0]*np.max(yvals)*ym_per_pix + fit_cr[1])**2)**1.5) \
                                     /np.absolute(2*fit_cr[0])
        return curverad 
Example 38
Project: HushUtility   Author: Deathhush   File: StockEvaluation.py    MIT License 5 votes vote down vote up
def generate_daily_df(symbol, year='', file_path = 'D:\\Testland\\stock_data\\'):
    if (year != ''):
        year_part = '\\%s\\' % year        
    result_df = pandas.read_csv('%s%s%s.csv' % (file_path, year_part, symbol), header=None, names=[u'date', u'time', u'open', u'high', u'low', u'close', u'volume',u'amount'])    
    result_df = result_df.groupby('date').agg({'high':np.max, 'low':np.min, 'volume':np.sum, 'amount':np.sum, 'open':'first', 'close':'last'})        
    result_df['ma5']=pd.rolling_mean(result_df['close'] , 5)
    result_df['ma10']=pd.rolling_mean(result_df['close'] , 10)
    analyzed_path = '%s\\analyzed\\%s.%s.daily.analyzed.csv' % (file_path, symbol, year)
    result_df.to_csv(analyzed_path)
    result_df = pandas.read_csv(analyzed_path)      
    return result_df 
Example 39
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def generate_adversarial_examples_np(self, ord, eps, **kwargs):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=eps, ord=ord,
                                        clip_min=-5, clip_max=5, **kwargs)
        if ord == np.inf:
            delta = np.max(np.abs(x_adv - x_val), axis=1)
        elif ord == 1:
            delta = np.sum(np.abs(x_adv - x_val), axis=1)
        elif ord == 2:
            delta = np.sum(np.square(x_adv - x_val), axis=1)**.5

        return x_val, x_adv, delta 
Example 40
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_can_be_called_with_different_eps(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        for eps in [0.1, 0.2, 0.3, 0.4]:
            x_adv = self.attack.generate_np(x_val, eps=eps, ord=np.inf,
                                            clip_min=-5.0, clip_max=5.0)

            delta = np.max(np.abs(x_adv - x_val), axis=1)
            self.assertClose(delta, eps) 
Example 41
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_clip_works_as_expected(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=0.5, ord=np.inf,
                                        clip_min=-0.2, clip_max=0.1)

        self.assertClose(np.min(x_adv), -0.2)
        self.assertClose(np.max(x_adv), 0.1) 
Example 42
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_do_not_reach_lp_boundary(self):
        """
        Make sure that iterative attack don't reach boundary of Lp
        neighbourhood if nb_iter * eps_iter is relatively small compared to
        epsilon.
        """
        for ord in [1, 2, np.infty]:
            _, _, delta = self.generate_adversarial_examples_np(
                ord=ord, eps=.5, nb_iter=10, eps_iter=.01)
            self.assertTrue(np.max(0.5 - delta) > 0.25) 
Example 43
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_can_be_called_with_different_decay_factor(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        for dacay_factor in [0.0, 0.5, 1.0]:
            x_adv = self.attack.generate_np(x_val, eps=0.5, ord=np.inf,
                                            dacay_factor=dacay_factor,
                                            clip_min=-5.0, clip_max=5.0)

            delta = np.max(np.abs(x_adv - x_val), axis=1)
            self.assertClose(delta, 0.5) 
Example 44
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_gives_clipped_adversarial_examples(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, max_iterations=10,
                                        binary_search_steps=1,
                                        learning_rate=1e-3,
                                        initial_const=1,
                                        clip_min=-0.2, clip_max=0.3,
                                        batch_size=100)

        self.assertTrue(-0.201 < np.min(x_adv))
        self.assertTrue(np.max(x_adv) < .301) 
Example 45
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_gives_clipped_adversarial_examples(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, over_shoot=0.02, max_iter=50,
                                        nb_candidate=2, clip_min=-0.2,
                                        clip_max=0.3)

        self.assertTrue(-0.201 < np.min(x_adv))
        self.assertTrue(np.max(x_adv) < .301) 
Example 46
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_clip_eta(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=1.0, eps_iter=0.1,
                                        nb_iter=5)

        delta = np.max(np.abs(x_adv - x_val), axis=1)
        self.assertTrue(np.all(delta <= 1.)) 
Example 47
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_gives_clipped_adversarial_examples(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=1.0, eps_iter=0.1,
                                        nb_iter=5,
                                        clip_min=-0.2, clip_max=0.3)

        self.assertTrue(-0.201 < np.min(x_adv))
        self.assertTrue(np.max(x_adv) < .301) 
Example 48
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_gives_clipped_adversarial_examples(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x_adv = self.attack.generate_np(x_val, max_iterations=10,
                                        binary_search_steps=1,
                                        initial_const=1,
                                        clip_min=-0.2, clip_max=0.3,
                                        batch_size=100, y_target=feed_labs)

        self.assertTrue(-0.201 < np.min(x_adv))
        self.assertTrue(np.max(x_adv) < .301) 
Example 49
Project: neural-fingerprinting   Author: StephanZheng   File: work_data.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def compute_work_statistics(self):
    """Computes statistics from all work pieces stored in this class."""
    result = {}
    for v in itervalues(self.work):
      submission_id = v['submission_id']
      if submission_id not in result:
        result[submission_id] = {
            'completed': 0,
            'num_errors': 0,
            'error_messages': set(),
            'eval_times': [],
            'min_eval_time': None,
            'max_eval_time': None,
            'mean_eval_time': None,
            'median_eval_time': None,
        }
      if not v['is_completed']:
        continue
      result[submission_id]['completed'] += 1
      if 'error' in v and v['error']:
        result[submission_id]['num_errors'] += 1
        result[submission_id]['error_messages'].add(v['error'])
      else:
        result[submission_id]['eval_times'].append(float(v['elapsed_time']))
    for v in itervalues(result):
      if v['eval_times']:
        v['min_eval_time'] = np.min(v['eval_times'])
        v['max_eval_time'] = np.max(v['eval_times'])
        v['mean_eval_time'] = np.mean(v['eval_times'])
        v['median_eval_time'] = np.median(v['eval_times'])
    return result 
Example 50
Project: neural-fingerprinting   Author: StephanZheng   File: set_loader.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def load_testset(size):
    # Load images paths and labels
    pairs = lfw.read_pairs(pairs_path)
    paths, labels = lfw.get_paths(testset_path, pairs, file_extension)

    # Random choice
    permutation = np.random.choice(len(labels), size, replace=False)
    paths_batch_1 = []
    paths_batch_2 = []

    for index in permutation:
        paths_batch_1.append(paths[index * 2])
        paths_batch_2.append(paths[index * 2 + 1])

    labels = np.asarray(labels)[permutation]
    paths_batch_1 = np.asarray(paths_batch_1)
    paths_batch_2 = np.asarray(paths_batch_2)

    # Load images
    faces1 = facenet.load_data(paths_batch_1, False, False, image_size)
    faces2 = facenet.load_data(paths_batch_2, False, False, image_size)

    # Change pixel values to 0 to 1 values
    min_pixel = min(np.min(faces1), np.min(faces2))
    max_pixel = max(np.max(faces1), np.max(faces2))
    faces1 = (faces1 - min_pixel) / (max_pixel - min_pixel)
    faces2 = (faces2 - min_pixel) / (max_pixel - min_pixel)

    # Convert labels to one-hot vectors
    onehot_labels = []
    for index in range(len(labels)):
        if labels[index]:
            onehot_labels.append([1, 0])
        else:
            onehot_labels.append([0, 1])

    return faces1, faces2, np.array(onehot_labels) 
Example 51
Project: Griffin_lim   Author: candlewill   File: audio.py    MIT License 5 votes vote down vote up
def save_wav(wav, path):
    wav *= 32767 / max(0.01, np.max(np.abs(wav)))
    wavfile.write(path, hparams.sample_rate, wav.astype(np.int16)) 
Example 52
Project: epyc   Author: simoninireland   File: test_summaryexperiments.py    GNU General Public License v2.0 5 votes vote down vote up
def testExtrema( self ):
        '''Test we capture the extrema correctly.'''
        N = 10
        self._lab['x'] = [ 5 ]    # dummy data point, we don't use it
        
        e = SampleExperiment5()
        es = SummaryExperiment(RepeatedExperiment(e, N))

        self._lab.runExperiment(es)
        self.assertTrue(es.success())
        res = (self._lab.results())[0]

        self.assertEqual(res[Experiment.METADATA][SummaryExperiment.UNDERLYING_SUCCESSFUL_RESULTS], 10)
        self.assertEqual(res[Experiment.RESULTS]['result_min'], numpy.min(e._values))
        self.assertEqual(res[Experiment.RESULTS]['result_max'], numpy.max(e._values)) 
Example 53
Project: epyc   Author: simoninireland   File: summaryexperiment.py    GNU General Public License v2.0 5 votes vote down vote up
def summarise( self, results ):
        """Generate a summary of results from a list of result dicts
        returned by running the underlying experiment. By default we generate
        mean, median, variance, and extrema for each value recorded.

        Override this method to create different or extra summary statistics.

        :param results: an array of result dicts
        :returns: a dict of summary statistics"""
        if len(results) == 0:
            return dict()
        else:
            summary = dict()

            # work out the fields to summarise
            allKeys = results[0][Experiment.RESULTS].keys()
            ks = self._summarised_results
            if ks is None:
                # if we don't restrict, summarise all keys
                ks = allKeys
            else:
                # protect against a key that's not present
                ks = [ k for k in ks if k in allKeys ]
                
            # add the summary statistics
            for k in ks:
                # compute summaries for all fields we're interested in
                vs = [ res[Experiment.RESULTS][k] for res in results ]
                summary[self._mean(k)]     = numpy.mean(vs)
                summary[self._median(k)]   = numpy.median(vs)
                summary[self._variance(k)] = numpy.var(vs)
                summary[self._min(k)]      = numpy.min(vs)
                summary[self._max(k)]      = numpy.max(vs)
                    
            return summary 
Example 54
Project: nmp_qc   Author: priba   File: utils.py    MIT License 5 votes vote down vote up
def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)
    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    pred = pred.type_as(target)
    target = target.type_as(pred)
    correct = pred.eq(target.view(1, -1).expand_as(pred))
    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res 
Example 55
Project: nmp_qc   Author: priba   File: utils.py    MIT License 5 votes vote down vote up
def collate_g(batch):

    batch_sizes = np.max(np.array([[len(input_b[1]), len(input_b[1][0]), len(input_b[2]),
                                len(list(input_b[2].values())[0])]
                                if input_b[2] else
                                [len(input_b[1]), len(input_b[1][0]), 0,0]
                                for (input_b, target_b) in batch]), axis=0)

    g = np.zeros((len(batch), batch_sizes[0], batch_sizes[0]))
    h = np.zeros((len(batch), batch_sizes[0], batch_sizes[1]))
    e = np.zeros((len(batch), batch_sizes[0], batch_sizes[0], batch_sizes[3]))

    target = np.zeros((len(batch), len(batch[0][1])))

    for i in range(len(batch)):

        num_nodes = len(batch[i][0][1])

        # Adjacency matrix
        g[i, 0:num_nodes, 0:num_nodes] = batch[i][0][0]

        # Node features
        h[i, 0:num_nodes, :] = batch[i][0][1]

        # Edges
        for edge in batch[i][0][2].keys():
            e[i, edge[0], edge[1], :] = batch[i][0][2][edge]
            e[i, edge[1], edge[0], :] = batch[i][0][2][edge]

        # Target
        target[i, :] = batch[i][1]

    g = torch.FloatTensor(g)
    h = torch.FloatTensor(h)
    e = torch.FloatTensor(e)
    target = torch.FloatTensor(target)

    return g, h, e, target 
Example 56
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def get_exon_max_num(self):
        exonMax = 0
        for _tran in self.trans:
            exonMax = max(exonMax, _tran.exonNum)
        return exonMax 
Example 57
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def gene_ends_update(self):
        for t in self.trans:
            self.start = min(self.start, np.min(t.exons))
            self.stop = max(self.stop, np.max(t.exons)) 
Example 58
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def get_exon_max_num(self):
        exonMax = 0
        for _tran in self.trans:
            exonMax = max(exonMax, _tran.exonNum)
        return exonMax 
Example 59
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def gene_ends_update(self):
        for t in self.trans:
            self.start = min(self.start, np.min(t.exons))
            self.stop = max(self.stop, np.max(t.exons)) 
Example 60
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def get_exon_max_num(self):
        exonMax = 0
        for _tran in self.trans:
            exonMax = max(exonMax, _tran.exonNum)
        return exonMax 
Example 61
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def get_exon_max_num(self):
        exonMax = 0
        for _tran in self.trans:
            exonMax = max(exonMax, _tran.exonNum)
        return exonMax 
Example 62
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def gene_ends_update(self):
        for t in self.trans:
            self.start = min(self.start, np.min(t.exons))
            self.stop = max(self.stop, np.max(t.exons)) 
Example 63
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 5 votes vote down vote up
def __init__(self, wave, dataset_len, test):
        self.wave = np.array(load(wave)[1], dtype=float)
        self.max = len(self.wave)-dif-sride*(3+padding*2)
        self.length = dataset_len
        if dataset_len <= 0:
            self.length = self.max // dif
        self.window = np.hanning(254)
        self.test = test 
Example 64
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 5 votes vote down vote up
def get_example(self, i):
        if self.test:
            p = i * dif
        else:
            while True:
                p = random.randint(0, self.max)
                if np.max(self.wave[p:p+dif]) > 1000:
                    break
        return wave2input_image(self.wave, self.window, p, padding) 
Example 65
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 5 votes vote down vote up
def __init__(self, encoded_path, dataset_len, test):
        self.images = np.load(encoded_path)
        self.max = self.images.shape[1]-height - padding*2
        self.length = dataset_len
        if dataset_len <= 0:
            self.length = self.max // height
        self.test = test 
Example 66
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 5 votes vote down vote up
def get_example(self, i):
        if self.test:
            p = i * height
        else:
            while True:
                p = random.randint(0, self.max)
                if np.max(self.images[:,p:p+height,:]) > 0.4:
                    break
        return np.copy(self.images[:,p:p+height+padding*2,:]) 
Example 67
Project: tensorflow_generate_headlines   Author: FanWan   File: textrank_word2vec.py    GNU General Public License v3.0 5 votes vote down vote up
def query_vector(word, model, embedding_size=300):
    try:
        init_vector = model[word]
        max_val, min_val = np.max(init_vector), np.min(init_vector)
        vector = (init_vector - min_val) / (max_val - min_val)
        return vector
    except KeyError:
        unknown_word_during_textrank.append(word)
        vec = np.zeros(embedding_size, dtype=float)
    return vec 
Example 68
Project: pytorch_NER_BiLSTM_CNN_CRF   Author: bamtercelboo   File: utils.py    Apache License 2.0 5 votes vote down vote up
def getMaxindex(model_out, label_size, args):
    """
    :param model_out: model output
    :param label_size: label size
    :param args: argument
    :return: max index for predict
    """
    max = model_out.data[0]
    maxIndex = 0
    for idx in range(1, label_size):
        if model_out.data[idx] > max:
            max = model_out.data[idx]
            maxIndex = idx
    return maxIndex 
Example 69
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 4 votes vote down vote up
def run(self, log=None):
        max_fitness_score = float('-inf') 
        dist = float('inf')
        best_text = ''
        itr = 1
        prev_loss = None
        if log is not None:
            log.write('target phrase: ' + self.target_phrase + '\n')
            log.write('itr, corr, lev dist \n')
        
        while itr <= self.max_iters and best_text != self.target_phrase:
            pop_scores, ctc = self.get_fitness_score(self.pop, self.target_phrase, self.input_audio)
            elite_ind = np.argsort(pop_scores)[-self.elite_size:]
            elite_pop, elite_pop_scores, elite_ctc = self.pop[elite_ind], pop_scores[elite_ind], ctc[elite_ind]
            
            if prev_loss is not None and prev_loss != elite_ctc[-1]: 
                self.mutation_p = self.mu * self.mutation_p + self.alpha / np.abs(prev_loss - elite_ctc[-1]) 

            if itr % 10 == 0:
                print('**************************** ITERATION {} ****************************'.format(itr))
                print('Current loss: {}'.format(-elite_ctc[-1]))
                save_wav(elite_pop[-1], self.output_wave_file)
                best_pop = np.tile(np.expand_dims(elite_pop[-1], axis=0), (100, 1))
                _, best_text = self.get_fitness_score(best_pop, self.target_phrase, self.input_audio, classify=True)
                
                dist = levenshteinDistance(best_text, self.target_phrase)
                corr = "{0:.4f}".format(np.corrcoef([self.input_audio, elite_pop[-1]])[0][1])
                print('Audio similarity to input: {}'.format(corr))
                print('Edit distance to target: {}'.format(dist))
                print('Currently decoded as: {}'.format(best_text))
                if log is not None:
                    log.write(str(itr) + ", " + corr + ", " + str(dist) + "\n")
                    
            if dist > 2:
                next_pop = get_new_pop(elite_pop, elite_pop_scores, self.pop_size)
                self.pop = mutate_pop(next_pop, self.mutation_p, self.noise_stdev, elite_pop)
                prev_loss = elite_ctc[-1]
                
            else:
                perturbed = np.tile(np.expand_dims(elite_pop[-1], axis=0), (self.num_points_estimate, 1))
                indices = np.random.choice(self.pop.shape[1], size=self.num_points_estimate, replace=False)

                perturbed[np.arange(self.num_points_estimate), indices] += self.delta_for_gradient
                perturbed_scores = self.get_fitness_score(perturbed, self.target_phrase, self.input_audio)[0]

                grad = (perturbed_scores - elite_ctc[-1]) / self.delta_for_gradient
                grad /= np.abs(grad).max()
                modified = elite_pop[-1].copy()
                modified[indices] += grad * self.delta_for_perturbation

                self.pop = np.tile(np.expand_dims(modified, axis=0), (self.pop_size, 1))
                self.delta_for_perturbation *= 0.995
                
            itr += 1

        return itr > self.max_iterations 
Example 70
Project: s2g   Author: caesar0301   File: shapegraph.py    MIT License 4 votes vote down vote up
def gen_major_components(self):
        """
        Merge individual lines into groups of touching lines.
        :return: a tuple of (connected, connectivity, major_components)
        """
        lines = self.geoms
        L = len(lines)
        graph = nx.Graph()

        logging.info("Validating pair-wise line connections of raw shapefiles (total {0} lines)".format(L))

        # the number of valid couples can be precomputed as
        # L + (L-1) + (L-2) + ... + 1 = L * (L + 1) / 2
        # where L are for i=0, L-1 for i=1 and so on
        # see here for the formula https://en.wikipedia.org/wiki/1_%2B_2_%2B_3_%2B_4_%2B_%E2%8B%AF
        number_of_couples = L * (L+1) // 2

        with progressbar.ProgressBar(max_value=number_of_couples) as bar:
            # here a simple counter gets the job done
            k = 0
            for i in range(0, L):
                for j in range(i + 1, L):
                    bar.update(k + 1)
                    k += 1

                    if self.validate_pairwise_connectivity(i, j):
                        graph.add_edge(i, j)

        # Validate lines connectivity:
        # Pairs of lines which are connected (touched). These line pairs
        # are not guaranteed to be in the same major component.
        self.connectivity = [i for i in graph.edges() if i[0] != i[1]]

        # major connected components
        cc = nx.algorithms.components.connected_components(graph)
        self.major_components = sorted([i for i in cc], key=len, reverse=True)

        # predict if all lines are strongly connected
        self.connected = True if len(self.major_components) == 1 else False

        # statistics
        print("Major components statistics:")
        print("\tTotal components: {0}".format(len(self.major_components)))
        if len(self.major_components) == 0:
            size = [0]
        else:
            size = [len(c) for c in self.major_components]
        print("\tComponent size: max {0}, median {1}, min {2}, average {3}" \
              .format(np.max(size), np.median(size), np.min(size), np.average(size)))
        print("\tTop comp. sizes: {0}".format(' '.join([str(i) for i in size[0:10]])))

        return self.connected, self.connectivity, self.major_components 
Example 71
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 4 votes vote down vote up
def _plot_group(self, ax, group_name, group_increasing, line_names, line_xs, line_ys):
        ax.cla()
        ax.grid()

        if self.autolimit_y and any([len(line_xs) > 0 for line_xs in line_xs]):
            min_x = min([np.min(line_x) for line_x in line_xs])
            max_x = max([np.max(line_x) for line_x in line_xs])
            min_y = min([np.min(line_y) for line_y in line_ys])
            max_y = max([np.max(line_y) for line_y in line_ys])

            if group_increasing:
                if max_y > 0:
                    limit_y_max = None
                    limit_y_min = max_y / self.autolimit_y_multiplier
                    if min_y > limit_y_min:
                        limit_y_min = None
            else:
                if min_y > 0:
                    limit_y_max = min_y * self.autolimit_y_multiplier
                    limit_y_min = None
                    if max_y < limit_y_max:
                        limit_y_max = None

            if limit_y_min is not None:
                ax.plot((min_x, max_x), (limit_y_min, limit_y_min), c="purple")

            if limit_y_max is not None:
                ax.plot((min_x, max_x), (limit_y_max, limit_y_max), c="purple")

            # y achse range begrenzen
            yaxmin = min_y if limit_y_min is None else limit_y_min
            yaxmax = max_y if limit_y_max is None else limit_y_max
            yrange = yaxmax - yaxmin
            yaxmin = yaxmin - (0.05 * yrange)
            yaxmax = yaxmax + (0.05 * yrange)
            ax.set_ylim([yaxmin, yaxmax])
        else:
            limit_y_min = None
            limit_y_max = None

        for line_name, line_x, line_y, line_col in zip(line_names, line_xs, line_ys, self.colors):
            x, y = self._line_to_xy(line_x, line_y, limit_y_min=limit_y_min, limit_y_max=limit_y_max)
            ax.plot(x, y, color=line_col, linewidth=1.0)

        ax.set_title(group_name) 
Example 72
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__dataset_manager.py    MIT License 4 votes vote down vote up
def describe_bow_dataset(
        dataset=None,
        dataset_name='Unknown dataset',
        percentiles=[0, 1, 10, 50, 90, 99, 100],
        label_list=None,
        **kwargs):
    n_docs, n_vocabs = dataset['x_csr_DV'].shape
    n_utokens_D = np.diff(dataset['doc_indptr_Dp1'])
    token_ct_D = np.squeeze(
        np.asarray(np.sum(dataset['x_csr_DV'], axis=1)))
    if dataset_name is not None:
        msg = "%s" % dataset_name
    else:
        msg = ""
    msg += "\n%d docs" % (n_docs)
    msg += "\n%d vocab words" % (n_vocabs)
    msg += "\nunique tokens per doc %s" % (
        make_percentile_str(n_utokens_D, percentiles, fmt_str='%7d'))
    msg += "\n total tokens per doc %s" % (
        make_percentile_str(token_ct_D, percentiles, fmt_str='%7d'))
    if 'y_DC' in dataset:
        msg += "\n%d labels" % dataset['n_labels']
        for c in xrange(dataset['n_labels']):
            y_c_D = dataset['y_DC'][:,c]
            try:
                rowmask = dataset['y_rowmask']
                y_c_D = y_c_D[rowmask == 1]
                assert np.all(np.isfinite(y_c_D))
            except KeyError:
                pass
            n_y_docs = y_c_D.size
            if c == 0:
                msg += "\n%.3f (%d/%d) docs are labeled" % (
                    float(n_y_docs)/n_docs, n_y_docs, n_docs)
            n_y_pos = np.sum(y_c_D == 1)
            if label_list:
                fmt_str = '%%-%ds' % (np.max(map(len, label_list)))
                label_c = (fmt_str + ' (%2d/%d)') % (
                    label_list[c], c+1, dataset['n_labels']) 
            else:
                label_c = 'label %2d/%d' % (c+1, dataset['n_labels']) 
            msg += '\n %s frac positive %.3f (%6d/%d)' % (
                label_c,
                float(n_y_pos) / float(n_y_docs), n_y_pos, n_y_docs)
            #msg += '\n frac negative %.3f (%6d/%d)' % (
            #    n_y_neg / float(n_y_docs), n_y_neg, n_y_docs)            
    return msg + "\n" 
Example 73
Project: curriculum-dropout   Author: pmorerio   File: load.py    GNU General Public License v3.0 4 votes vote down vote up
def doubleMnist(datasets_dir='/data/datasets/'):
	#rndSeed=231
	#rnd.seed(rndSeed)
	print 'Generating '
	trX,teX,trY,teY = mnist(datasets_dir,onehot=False)

	train_size = 60000
	test_size = 10000
	
	trainX = np.zeros((train_size,64,64))
	trainY = np.zeros((train_size,55))
	testX = np.zeros((test_size,64,64))
	testY = np.zeros((test_size,55))

	for ii  in range(len(trainY)):
		
		xc1,yc1 = rnd.randint(0,35), rnd.randint(0,35)
		xc2,yc2 = rnd.randint(0,35), rnd.randint(0,35)
		
		i1 = rnd.randint(0,60000-1)
		i2 = rnd.randint(0,60000-1)
		trainX[ii,yc1:yc1+28,xc1:xc1+28] += trX[i1,:,:]
		trainX[ii,yc2:yc2+28,xc2:xc2+28] += trX[i2,:,:]
		a = np.max((trY[i1],trY[i2]))
		b = np.min((trY[i1],trY[i2]))
		lab = (a+1)*(a+2)/2 -a + b - 1 
		trainY[ii,:] = one_hot(lab,55)
	
	print '... '
		
	for ii  in range(len(testY)):
		
		xc1,yc1 = rnd.randint(0,35), rnd.randint(0,35)
		xc2,yc2 = rnd.randint(0,35), rnd.randint(0,35)
		
		i1 = rnd.randint(0,10000-1)
		i2 = rnd.randint(0,10000-1)
		testX[ii,yc1:yc1+28,xc1:xc1+28] += teX[i1,:,:]
		testX[ii,yc2:yc2+28,xc2:xc2+28] += teX[i2,:,:]
		a = np.max((teY[i1],teY[i2]))
		b = np.min((teY[i1],teY[i2]))
		lab = (a+1)*(a+2)/2 -a + b -1 
		testY[ii,:] = one_hot(lab,55)
	
	print 'Done!'
	return trainX, trainY, testX, testY 
Example 74
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: coco.py    MIT License 4 votes vote down vote up
def _load_coco_annotation(self, index):
        """
        Loads COCO bounding-box instance annotations. Crowd instances are
        handled by marking their overlaps (with all categories) to -1. This
        overlap value means that crowd "instances" are excluded from training.
        """
        im_ann = self._COCO.loadImgs(index)[0]
        width = im_ann['width']
        height = im_ann['height']

        annIds = self._COCO.getAnnIds(imgIds=index, iscrowd=None)
        objs = self._COCO.loadAnns(annIds)
        # Sanitize bboxes -- some are invalid
        valid_objs = []
        for obj in objs:
            x1 = np.max((0, obj['bbox'][0]))
            y1 = np.max((0, obj['bbox'][1]))
            x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
            y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
            if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
                obj['clean_bbox'] = [x1, y1, x2, y2]
                valid_objs.append(obj)
        objs = valid_objs
        num_objs = len(objs)

        boxes = np.zeros((num_objs, 4), dtype=np.uint16)
        gt_classes = np.zeros((num_objs), dtype=np.int32)
        overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
        seg_areas = np.zeros((num_objs), dtype=np.float32)

        # Lookup table to map from COCO category ids to our internal class
        # indices
        coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
                                          self._class_to_ind[cls])
                                         for cls in self._classes[1:]])

        for ix, obj in enumerate(objs):
            cls = coco_cat_id_to_class_ind[obj['category_id']]
            boxes[ix, :] = obj['clean_bbox']
            gt_classes[ix] = cls
            seg_areas[ix] = obj['area']
            if obj['iscrowd']:
                # Set overlap to -1 for all classes for crowd objects
                # so they will be excluded during training
                overlaps[ix, :] = -1.0
            else:
                overlaps[ix, cls] = 1.0

        ds_utils.validate_boxes(boxes, width=width, height=height)
        overlaps = scipy.sparse.csr_matrix(overlaps)
        return {'width': width,
                'height': height,
                'boxes': boxes,
                'gt_classes': gt_classes,
                'gt_overlaps': overlaps,
                'flipped': False,
                'seg_areas': seg_areas} 
Example 75
Project: cwavegan   Author: acheketa   File: preview.py    MIT License 4 votes vote down vote up
def main(argv):
    del argv
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu,
        zone=FLAGS.tpu_zone,
        project=FLAGS.gcp_project)

    config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        model_dir=FLAGS.model_dir,
        tpu_config=tf.contrib.tpu.TPUConfig(
            num_shards=FLAGS.num_shards,
            iterations_per_loop=FLAGS.iterations_per_loop))

    # Set module-level global variable so that model_fn and input_fn can be
    # identical for each different kind of dataset and model
    global dataset, model
    dataset = bias_input
    model = bias_model

    # TPU-based estimator used for TRAIN and EVAL
    est = tf.contrib.tpu.TPUEstimator(
        model_fn=model_fn,
        use_tpu=FLAGS.use_tpu,
        config=config,
        train_batch_size=FLAGS.batch_size,
        eval_batch_size=FLAGS.batch_size)

    # CPU-based estimator used for PREDICT (generating images)
    cpu_est = tf.contrib.tpu.TPUEstimator(
        model_fn=model_fn,
        use_tpu=False,
        config=config,
        predict_batch_size=_NUM_VIZ_AUDIO)

    current_step = estimator._load_global_step_from_checkpoint_dir(
        FLAGS.model_dir)  # pylint: disable=protected-access,line-too-long
    tf.logging.info('Starting training for %d steps, current step: %d' %
                    (FLAGS.train_steps, current_step))

    # Render some generated images
    G_z = cpu_est.predict(input_fn=noise_input_fn)
    G_z = [p['generated_audio'][:, :] for p in G_z]
    G_z = np.array(G_z)
    preview_dir = './preview'
    if not os.path.isdir(preview_dir):
        os.makedirs(preview_dir)

    for i in range(len(G_z)):
      audio = np.int16(G_z[i]/np.max(np.abs(G_z[i])) * 32767)
      preview_fp = os.path.join(preview_dir, '{}_{}_{}.wav'.format(str(i % 10), str(current_step), str(i)))
      wavwrite(preview_fp, _FS, audio)

    tf.logging.info('Finished generating images') 
Example 76
Project: ml_news_popularity   Author: khaledJabr   File: lr.py    MIT License 4 votes vote down vote up
def fit(self, data, targets):
        self.training = data
        self.targets = targets

        self.std_scaler = preprocessing.StandardScaler().fit(data)
        data = self.std_scaler.transform(data)
        targets = preprocessing.scale(targets, with_mean=True, with_std=False)

        # self.max_target = np.max(targets)
        # self.beta_mins = [np.min(data[:,j]) for j in range(len(data[0]))]
        # # print(self.beta_mins)
        # self.beta_maxes = [np.max(data[:,j]) for j in range(len(data[0]))]
        # # print(self.beta_maxes)
        # self.norm_targets = self.normalize_zero_max(self.targets, self.max_target)

        # norm = np.zeros((len(data), len(data[0])))
        # # normalize features
        # for j in range(len(data[0])):
        #     norm[:,j] = self.normalize(data[:,j], self.beta_mins[j], self.beta_maxes[j])
        
        # initialize weights
        self.betas = np.zeros(len(data[0]))
        # norm = normalize(self.training)
        # norm = self.training
        # iterate till not converged
        for iter in range(self.max_iter):
            # print("=== Iter {}".format(iter))
            # iterate over all features (j=0,1...M)            
            for j in range(len(self.betas)):
                # remove feature j to determine effect on residuals
                # b_no_j = self.betas.copy()
                # b_no_j[j] = 0
                # x_no_j = self.training
                self.betas[j] = self.update_beta(data, targets, self.betas, j)
                # x_no_j = np.delete(self.training, j, 1)
                # b_no_j = np.delete(self.betas, j, 0)
                # # predict_no_j = np.dot(b_no_j, x_no_j.T)
                # predict_no_j = x_no_j.dot(b_no_j)

                # # use normalized targets
                # residuals = self.norm_targets - predict_no_j
                # p_j = np.dot(norm[:,j], residuals)
                # print("Max Residual = {}".format(np.max(residuals)))
                # print(p_j)
                # # update beta_j based on residuals
                # self.betas[j] = self.soft_threshold(p_j, self.alpha / 2);
            # if iter % 100 == 0:
                # print(self.betas)
                
            # print(self.betas) 
Example 77
Project: neural-fingerprinting   Author: StephanZheng   File: test_imagenet_attacks.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_attack_bounds(self):
        """Check SPSA respects perturbation limits."""
        epsilon = 4. / 255
        input_dir = FLAGS.input_image_dir
        metadata_file_path = FLAGS.metadata_file_path
        num_images = 8
        batch_shape = (num_images, 299, 299, 3)
        images, labels = load_images(
            input_dir, metadata_file_path, batch_shape)
        num_classes = 1001

        tf.logging.set_verbosity(tf.logging.INFO)
        with tf.Graph().as_default():
            # Prepare graph
            x_input = tf.placeholder(tf.float32, shape=(1,) + batch_shape[1:])
            y_label = tf.placeholder(tf.int32, shape=(1,))
            model = InceptionModel(num_classes)

            attack = SPSA(model)
            x_adv = attack.generate(
                x_input, y=y_label, epsilon=epsilon, num_steps=10,
                early_stop_loss_threshold=-1., spsa_samples=32, spsa_iters=1,
                is_debug=True)

            # Run computation
            saver = tf.train.Saver(slim.get_model_variables())
            session_creator = tf.train.ChiefSessionCreator(
                scaffold=tf.train.Scaffold(saver=saver),
                checkpoint_filename_with_path=FLAGS.checkpoint_path,
                master=FLAGS.master)

            with tf.train.MonitoredSession(
                    session_creator=session_creator) as sess:
                for i in xrange(num_images):
                    adv_image = sess.run(x_adv, feed_dict={
                        x_input: np.expand_dims(images[i], axis=0),
                        y_label: np.expand_dims(labels[i], axis=0),
                    })
                    diff = adv_image - images[i]
                    assert np.max(np.abs(diff)) < epsilon + 1e-4
                    assert np.max(adv_image < 1. + 1e-4)
                    assert np.min(adv_image > -1e-4) 
Example 78
Project: neural-fingerprinting   Author: StephanZheng   File: attacks_tf.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def saliency_map(grads_target, grads_other, search_domain, increase):
    """
    TensorFlow implementation for computing saliency maps
    :param grads_target: a matrix containing forward derivatives for the
                         target class
    :param grads_other: a matrix where every element is the sum of forward
                        derivatives over all non-target classes at that index
    :param search_domain: the set of input indices that we are considering
    :param increase: boolean; true if we are increasing pixels, false otherwise
    :return: (i, j, search_domain) the two input indices selected and the
             updated search domain
    """
    # Compute the size of the input (the number of features)
    nf = len(grads_target)

    # Remove the already-used input features from the search space
    invalid = list(set(range(nf)) - search_domain)
    increase_coef = (2 * int(increase) - 1)
    grads_target[invalid] = -increase_coef * np.max(np.abs(grads_target))
    grads_other[invalid] = increase_coef * np.max(np.abs(grads_other))

    # Create a 2D numpy array of the sum of grads_target and grads_other
    target_sum = grads_target.reshape((1, nf)) + grads_target.reshape((nf, 1))
    other_sum = grads_other.reshape((1, nf)) + grads_other.reshape((nf, 1))

    # Create a mask to only keep features that match saliency map conditions
    if increase:
        scores_mask = ((target_sum > 0) & (other_sum < 0))
    else:
        scores_mask = ((target_sum < 0) & (other_sum > 0))

    # Create a 2D numpy array of the scores for each pair of candidate features
    scores = scores_mask * (-target_sum * other_sum)

    # A pixel can only be selected (and changed) once
    np.fill_diagonal(scores, 0)

    # Extract the best two pixels
    best = np.argmax(scores)
    p1, p2 = best % nf, best // nf

    # Remove used pixels from our search domain
    search_domain.discard(p1)
    search_domain.discard(p2)

    return p1, p2, search_domain 
Example 79
Project: neural-fingerprinting   Author: StephanZheng   File: attacks_tf.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def jacobian_augmentation(sess,
                          x,
                          X_sub_prev,
                          Y_sub,
                          grads,
                          lmbda,
                          aug_batch_size=512,
                          feed=None):
    """
    Augment an adversary's substitute training set using the Jacobian
    of a substitute model to generate new synthetic inputs.
    See https://arxiv.org/abs/1602.02697 for more details.
    See cleverhans_tutorials/mnist_blackbox.py for example use case
    :param sess: TF session in which the substitute model is defined
    :param x: input TF placeholder for the substitute model
    :param X_sub_prev: substitute training data available to the adversary
                       at the previous iteration
    :param Y_sub: substitute training labels available to the adversary
                  at the previous iteration
    :param grads: Jacobian symbolic graph for the substitute
                  (should be generated using attacks_tf.jacobian_graph)
    :return: augmented substitute data (will need to be labeled by oracle)
    """
    assert len(x.get_shape()) == len(np.shape(X_sub_prev))
    assert len(grads) >= np.max(Y_sub) + 1
    assert len(X_sub_prev) == len(Y_sub)

    aug_batch_size = min(aug_batch_size, X_sub_prev.shape[0])

    # Prepare input_shape (outside loop) for feeding dictionary below
    input_shape = list(x.get_shape())
    input_shape[0] = 1

    # Create new numpy array for adversary training data
    # with twice as many components on the first dimension.
    X_sub = np.vstack([X_sub_prev, X_sub_prev])
    num_samples = X_sub_prev.shape[0]

    # Creating and processing as batch
    nb_batches_aug = int((num_samples + aug_batch_size - 1) / aug_batch_size)
    for p_idxs in range(0, num_samples, aug_batch_size):
        X_batch = X_sub_prev[p_idxs:p_idxs + aug_batch_size, ...]
        feed_dict = {x: X_batch}
        if feed is not None:
            feed_dict.update(feed)

        # Compute sign matrix
        grad_val = sess.run([tf.sign(grads)], feed_dict=feed_dict)[0]

        # Create new synthetic point in adversary substitute training set
        for (indx, ind) in zip(
                range(p_idxs, p_idxs + X_batch.shape[0]),
                range(X_batch.shape[0])):
            X_sub[num_samples + indx] = (
                X_batch[ind] + lmbda * grad_val[Y_sub[indx], ind, ...])

    # Return augmented training data (needs to be labeled afterwards)
    return X_sub 
Example 80
Project: uci-download-process   Author: cperales   File: fold_data.py    MIT License 4 votes vote down vote up
def k_folding(data_folder, log_file, file=None, classification=True):
    dir_file_pairs = dir_file(data_folder, file)

    # SPLITTING ONE DATASET FILE IN N_FOLDS
    n_fold = 10
    with open(log_file, 'w') as f:
        for dir_file_pair in dir_file_pairs:
            try:
                dir_name, file_name = dir_file_pair
                # print('Folding {}'.format(file_name))
                df_file = pd.read_csv(os.path.join(dir_name, file_name),
                                      sep='\s+',
                                      header=None)
                target_position = df_file.columns[-1]
                x = df_file[[i for i in range(target_position)]]
                y = df_file[[target_position]]
                # Testing if there is enough instances for n fold
                count = [np.count_nonzero(y == label) for label in np.unique(y)]
                if np.min(count) < 2:
                    raise ValueError('Not enough elements of one label')
                rep = np.max(count)  # If maximum is not enough to n fold
                if n_fold > rep:
                    times = math.ceil(n_fold / rep)
                    x = pd.concat(times * [x])
                    y = pd.concat(times * [y])
                # Shuffle false in order to preserve
                i = 0
                file = file_name.replace('.data', '')
                if classification is True:
                    kf = StratifiedKFold(n_splits=n_fold, shuffle=False)
                else:
                    kf = KFold(n_splits=n_fold, shuffle=True)
                for train_index, test_index in kf.split(X=x, y=y):
                    x_train_fold = x.iloc[train_index]
                    y_train_fold = y.iloc[train_index]
                    train_fold = pd.concat([x_train_fold, y_train_fold], axis=1)
                    train_fold_name = '.'.join(['_'.join(['train', file]), str(i)])
                    train_fold_name_path = os.path.join(dir_name, train_fold_name)
                    train_fold.to_csv(train_fold_name_path,
                                      sep=' ',
                                      header=None,
                                      index=False)

                    x_test_fold = x.iloc[test_index]
                    y_test_fold = y.iloc[test_index]
                    test_fold = pd.concat([x_test_fold, y_test_fold], axis=1)
                    test_fold_name = '.'.join(['_'.join(['test', file]), str(i)])
                    test_fold_name_path = os.path.join(dir_name, test_fold_name)
                    test_fold.to_csv(test_fold_name_path,
                                     sep=' ',
                                     header=None,
                                     index=False)

                    i += 1
            except ValueError as e:
                print(e, ', '
                         'so {} can\'t be stratified'.format(file_name))
                f.write(os.path.join('processed/', file_name))
                f.write('\n')
                shutil.rmtree(dir_name)