Python numpy.arange() Examples

The following are code examples for showing how to use numpy.arange(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: explirefit   Author: codogogo   File: batcher.py    Apache License 2.0 6 votes vote down vote up
def batch_iter(data, batch_size, num_epochs, shuffle = True):
		"""
		Generates a batch iterator for a dataset.
		"""
		#data = np.array(data, dtype = np.int32)
		data_size = len(data)

		num_batches_per_epoch = int(data_size/batch_size) + 1
		for epoch in range(num_epochs):
			# Shuffle the data at each epoch
			if shuffle:
				#shuffle_indices = np.random.permutation(np.arange(data_size))
				#shuffled_data = data[shuffle_indices]
				random.shuffle(data)
			#else:
			#	shuffled_data = data

			for batch_num in range(num_batches_per_epoch):
				start_index = batch_num * batch_size
				end_index = min((batch_num + 1) * batch_size, data_size)
				yield data[start_index:end_index] 
Example 2
Project: osqf2015   Author: mvaz   File: stock.py    MIT License 6 votes vote down vote up
def create(clz):
        """One-time creation of app's objects.

        This function is called once, and is responsible for
        creating all objects (plots, datasources, etc)
        """
        self = clz()
        n_vals = 1000
        self.source = ColumnDataSource(
            data=dict(
                top=[],
                bottom=0,
                left=[],
                right=[],
                x= np.arange(n_vals),
                values= np.random.randn(n_vals)
                ))

        # Generate a figure container
        self.stock_plot = clz.create_stock(self.source)
        self.update_data()
        self.children.append(self.stock_plot) 
Example 3
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: snippets.py    MIT License 6 votes vote down vote up
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
  """ A wrapper function to generate anchors given different scales
    Also return the number of anchors in variable 'length'
  """
  anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
  A = anchors.shape[0]
  shift_x = np.arange(0, width) * feat_stride
  shift_y = np.arange(0, height) * feat_stride
  shift_x, shift_y = np.meshgrid(shift_x, shift_y)
  shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
  K = shifts.shape[0]
  # width changes faster, so here it is H, W, C
  anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
  anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
  length = np.int32(anchors.shape[0])

  return anchors, length 
Example 4
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    MIT License 6 votes vote down vote up
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
        """
        Generates a batch iterator for a dataset.
        """
        data = np.asarray(data)
        print(data)
        print(data.shape)
        data_size = len(data)
        num_batches_per_epoch = int(len(data)/batch_size) + 1
        for epoch in range(num_epochs):
            # Shuffle the data at each epoch
            if shuffle:
                shuffle_indices = np.random.permutation(np.arange(data_size))
                shuffled_data = data[shuffle_indices]
            else:
                shuffled_data = data
            for batch_num in range(num_batches_per_epoch):
                start_index = batch_num * batch_size
                end_index = min((batch_num + 1) * batch_size, data_size)
                yield shuffled_data[start_index:end_index] 
Example 5
Project: cgp-cnn   Author: sg-nm   File: cgp.py    MIT License 6 votes vote down vote up
def active_net_list(self):
        net_list = [["input", 0, 0]]
        active_cnt = np.arange(self.net_info.input_num + self.net_info.node_num + self.net_info.out_num)
        active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active)

        for n, is_a in enumerate(self.is_active):
            if is_a:
                t = self.gene[n][0]
                if n < self.net_info.node_num:    # intermediate node
                    type_str = self.net_info.func_type[t]
                else:    # output node
                    type_str = self.net_info.out_type[t]

                connections = [active_cnt[self.gene[n][i+1]] for i in range(self.net_info.max_in_num)]
                net_list.append([type_str] + connections)
        return net_list


# CGP with (1 + \lambda)-ES 
Example 6
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__init_manager.py    MIT License 6 votes vote down vote up
def init_topics_KV__rand_active_words(
        n_states=10,
        frac_words_active=0.5,
        blend_frac_active=0.5,
        n_vocabs=144,
        seed=0):
    prng = np.random.RandomState(int(seed))
    unif_topics_KV = np.ones((n_states, n_vocabs)) / float(n_vocabs)
    active_topics_KV = np.zeros((n_states, n_vocabs))
    for k in xrange(n_states):
        active_words_U = prng.choice(
            np.arange(n_vocabs, dtype=np.int32),
            int(frac_words_active * n_vocabs),
            replace=False)
        active_topics_KV[k, active_words_U] = 1.0 / active_words_U.size
    topics_KV = (1 - blend_frac_active) * unif_topics_KV \
        + blend_frac_active * active_topics_KV
    topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    return topics_KV 
Example 7
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__init_manager.py    MIT License 6 votes vote down vote up
def init_topics_KV__rand_docs(
        dataset=None,
        n_states=10,
        n_vocabs=144,
        blend_frac_doc=0.5,
        seed=0):
    prng = np.random.RandomState(int(seed))
    unif_topics_KV = np.ones((n_states, n_vocabs)) / float(n_vocabs)
    doc_KV = np.zeros((n_states, n_vocabs))
    chosen_doc_ids = prng.choice(
        np.arange(dataset['n_docs'], dtype=np.int32),
        n_states,
        replace=False)
    for k in xrange(n_states):
        start_d = dataset['doc_indptr_Dp1'][chosen_doc_ids[k]]
        stop_d = dataset['doc_indptr_Dp1'][chosen_doc_ids[k] + 1]
        active_words_U = dataset['word_id_U'][start_d:stop_d]
        doc_KV[k, active_words_U] = dataset['word_ct_U'][start_d:stop_d]
    doc_KV /= doc_KV.sum(axis=1)[:,np.newaxis]
    topics_KV = (1 - blend_frac_doc) * unif_topics_KV \
        + blend_frac_doc * doc_KV
    topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    return topics_KV 
Example 8
Project: curriculum-dropout   Author: pmorerio   File: DataSet.py    GNU General Public License v3.0 6 votes vote down vote up
def next_batch(self, batch_size):
    """Return the next `batch_size` examples from this data set."""

    start = self._index_in_epoch
    self._index_in_epoch += batch_size
    if self._index_in_epoch > self._num_examples:
      # Finished epoch
      self._epochs_completed += 1
      # Shuffle the data
      perm = np.arange(self._num_examples)
      np.random.shuffle(perm)
      self._images = self._images[perm]
      self._labels = self._labels[perm]
      # Start next epoch
      start = 0
      self._index_in_epoch = batch_size
      assert batch_size <= self._num_examples
    end = self._index_in_epoch
    return self._images[start:end], self._labels[start:end] 
Example 9
Project: curriculum-dropout   Author: pmorerio   File: DataSet.py    GNU General Public License v3.0 6 votes vote down vote up
def next_batch(self, batch_size):
    """Return the next `batch_size` examples from this data set."""

    start = self._index_in_epoch
    self._index_in_epoch += batch_size
    if self._index_in_epoch > self._num_examples:
      # Finished epoch
      self._epochs_completed += 1
      # Shuffle the data
      perm = np.arange(self._num_examples)
      np.random.shuffle(perm)
      self._images = self._images[perm]
      self._labels = self._labels[perm]
      # Start next epoch
      start = 0
      self._index_in_epoch = batch_size
      assert batch_size <= self._num_examples
    end = self._index_in_epoch
    return self._images[start:end], self._labels[start:end] 
Example 10
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: roi_data_layer.py    MIT License 6 votes vote down vote up
def _shuffle_roidb_inds(self):
        """Randomly permute the training roidb."""
        # If the random flag is set,
        # then the database is shuffled according to system time
        # Useful for the validation set
        if self._random:
            st0 = np.random.get_state()
            millis = int(round(time.time() * 1000)) % 4294967295
            np.random.seed(millis)

        self._perm = np.random.permutation(np.arange(len(self._roidb)))
        # Restore the random state
        if self._random:
            np.random.set_state(st0)

        self._cur = 0 
Example 11
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: snippets.py    MIT License 6 votes vote down vote up
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):
    """ A wrapper function to generate anchors given different scales
      Also return the number of anchors in variable 'length'
    """
    anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
    A = anchors.shape[0]
    shift_x = np.arange(0, width) * feat_stride
    shift_y = np.arange(0, height) * feat_stride
    shift_x, shift_y = np.meshgrid(shift_x, shift_y)
    shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
    K = shifts.shape[0]
    # width changes faster, so here it is H, W, C
    anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
    anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
    length = np.int32(anchors.shape[0])

    return anchors, length 
Example 12
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 6 votes vote down vote up
def create_mnist(tfrecord_dir, mnist_dir):
    print('Loading MNIST from "%s"' % mnist_dir)
    import gzip
    with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
        images = np.frombuffer(file.read(), np.uint8, offset=16)
    with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
        labels = np.frombuffer(file.read(), np.uint8, offset=8)
    images = images.reshape(-1, 1, 28, 28)
    images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
    assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (60000,) and labels.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0
    
    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 13
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 6 votes vote down vote up
def create_cifar100(tfrecord_dir, cifar100_dir):
    print('Loading CIFAR-100 from "%s"' % cifar100_dir)
    import pickle
    with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
        data = pickle.load(file, encoding='latin1')
    images = data['data'].reshape(-1, 3, 32, 32)
    labels = np.array(data['fine_labels'])
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 99
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 14
Project: fbpconv_tf   Author: panakino   File: image_util.py    GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, search_path, a_min=None, a_max=None, shuffle_data=True, is_flipping=True,n_class = 1):
        super(ImageDataProvider_hdf5, self).__init__(a_min, a_max)
        self.file_idx = -1
        self.shuffle_data = shuffle_data
        self.is_flipping= is_flipping
        self.n_class = n_class

        self.data_files = self._find_data_files(search_path)

        self.data_train=self._load_file(self.data_files[0],'sparse')
        self.data_label=self._load_file(self.data_files[0],'label')
        self.tN=self.data_train.shape[-1]
        self.ids=np.arange(self.tN)
        if self.shuffle_data:
            #np.random.shuffle(self.data_files)
            np.random.shuffle(self.ids)

        assert len(self.data_files) > 0, "No training files"
        print("Number of files used: %s" % len(self.data_files))

        #img = self._load_file(self.data_files[0])
        #self.channels = 1 if len(img.shape) == 2 else img.shape[-1]
        self.channels=n_class 
Example 15
Project: fbpconv_tf   Author: panakino   File: image_util.py    GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, search_path, a_min=None, a_max=None, shuffle_data=True, is_flipping=True,n_class = 1):
        self.file_idx = -1
        self.shuffle_data = shuffle_data
        self.is_flipping= is_flipping
        self.n_class = n_class

        self.data_files = self._find_data_files(search_path)

        self.data_train=self._load_file(self.data_files[0],'sparse')
        self.data_label=self._load_file(self.data_files[0],'label')
        self.tN=self.data_train.shape[-1]
        self.ids=np.arange(self.tN)
        if self.shuffle_data:
            #np.random.shuffle(self.data_files)
            np.random.shuffle(self.ids)

        assert len(self.data_files) > 0, "No training files"
        print("Number of files used: %s" % len(self.data_files))

        #img = self._load_file(self.data_files[0])
        #self.channels = 1 if len(img.shape) == 2 else img.shape[-1]
        self.channels=n_class 
Example 16
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
Example 17
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def _split_into_chunks(signal, chunks_per_second=24):
    # TODO currently broken
    raise NotImplemented("Splitting to chunks is currently broken.")
    window_length_ms = 1/chunks_per_second * 1000
    intervals = np.arange(window_length_ms, signal.shape[0], window_length_ms, dtype=np.int32)
    chunks = np.array_split(signal, intervals, axis=0)
    pad_to = _next_power_of_two(np.max([chunk.shape[0] for chunk in chunks]))
    padded_chunks = np.stack(np.concatenate([chunk, np.zeros((pad_to - chunk.shape[0],))]) for chunk in chunks)
    return padded_chunks 
Example 18
Project: autolims   Author: scottbecker   File: utils.py    MIT License 5 votes vote down vote up
def touchdown_pcr(fromC, toC, durations, stepsize=2, meltC=98, extC=72):
    """Touchdown PCR protocol generator
    
    Doesn't include the toC as a step.
    
    """
    assert 0 < stepsize < toC < fromC
    def td(temp, dur): return {"temperature":"{:2g}:celsius".format(temp), "duration":"{:d}:second".format(dur)}

    return [{"cycles": 1, "steps": [td(meltC, durations[0]), td(C, durations[1]), td(extC, durations[2])]}
            for C in numpy.arange(fromC, toC, -stepsize)] 
Example 19
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def transform_roc(X1, X2, X3):
    n_batch = len(X1)
    xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32)
    mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32)
    start = encoder['_start_']
    delimiter = encoder['_delimiter_']
    for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
        x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [clf_token]
        x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
        l12 = len(x12)
        l13 = len(x13)
        xmb[i, 0, :l12, 0] = x12
        xmb[i, 1, :l13, 0] = x13
        mmb[i, 0, :l12] = 1
        mmb[i, 1, :l13] = 1
    xmb[:, :, :, 1] = np.arange(
        n_vocab + n_special, n_vocab + n_special + n_ctx)
    return xmb, mmb 
Example 20
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def transform_sst(X1):
    n_batch = len(X1)
    xmb = np.zeros((n_batch, 1, n_ctx, 2), dtype=np.int32)
    mmb = np.zeros((n_batch, 1, n_ctx), dtype=np.float32)
    start = encoder['_start_']
    delimiter = encoder['_delimiter_']
    for i, x1, in enumerate(X1):
        x1 = [start] + x1[:max_len] + [clf_token]
        l1 = len(x1)
        xmb[i, 0, :l1, 0] = x1
        mmb[i, 0, :l1] = 1
    xmb[:, :, :, 1] = np.arange(
        n_vocab + n_special, n_vocab + n_special + n_ctx)
    return xmb, mmb 
Example 21
Project: osqf2015   Author: mvaz   File: stock.py    MIT License 5 votes vote down vote up
def update_data(self):
        """Called each time that any watched property changes.

        This updates the sin wave data with the most recent values of the
        sliders. This is stored as two numpy arrays in a dict into the app's
        data histogram_source property.
        """
        logging.debug("update_data")
        n_vals = 1000
        self.source.data = dict(top=hist, bottom=0, left=0, right = 0, x=np.arange(n_vals), values=np.random.randn(n_vals)) 
Example 22
Project: building-boundary   Author: Geodan   File: segmentation.py    MIT License 5 votes vote down vote up
def boundary_segmentation(points, distance):
    """
    Extract linear segments using RANSAC.

    Parameters
    ----------
    points : (Mx2) array
        The coordinates of the points.
    distance : float
        The maximum distance between a point and a line for a point to be
        considered belonging to that line.

    Returns
    -------
    segments : list of array
        The linear segments.
    """
    points_shifted = points.copy()
    shift = np.min(points_shifted, axis=0)
    points_shifted -= shift

    mask = np.ones(len(points_shifted), dtype=np.bool)
    indices = np.arange(len(points_shifted))

    segments = []
    extract_segments(segments, points_shifted, indices, mask, distance)

    segments = [points_shifted[i]+shift for i in segments]

    return segments 
Example 23
Project: fenics-topopt   Author: zfergus   File: boundary_conditions.py    MIT License 5 votes vote down vote up
def get_fixed_nodes(self):
        # Return a list of fixed nodes for the problem
        dofs = np.arange(2 * (self.nelx + 1) * (self.nely + 1))
        fixed = np.union1d(dofs[0:2 * (self.nely + 1):2],
            np.array([2 * (self.nelx + 1) * (self.nely + 1) - 1]))
        return fixed 
Example 24
Project: fenics-topopt   Author: zfergus   File: problem.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, penal, bc):
        # Problem size
        self.nelx = nelx
        self.nely = nely

        # Max and min stiffness
        self.Emin = 1e-9
        self.Emax = 1.0

        # SIMP penalty
        self.penal = penal

        # dofs:
        self.ndof = 2 * (nelx + 1) * (nely + 1)

        # FE: Build the index vectors for the for coo matrix format.
        self.build_indices(nelx, nely)

        # BC's and support (half MBB-beam)
        dofs = np.arange(2 * (nelx + 1) * (nely + 1))
        self.fixed = bc.get_fixed_nodes()
        self.free = np.setdiff1d(dofs, self.fixed)

        # Solution and RHS vectors
        self.f = bc.get_forces()
        self.u = np.zeros(self.f.shape)

        # Per element compliance
        self.ce = np.zeros(nely * nelx) 
Example 25
Project: fenics-topopt   Author: zfergus   File: tower.py    MIT License 5 votes vote down vote up
def get_forces(self):
        # Return the force vector for the problem
        topx_to_id = np.vectorize(
            lambda x: xy_to_id(x, 0, self.nelx, self.nely))
        topx = 2 * topx_to_id(np.arange((self.nelx + 1) // 2)) + 1
        f = np.zeros((2 * (self.nelx + 1) * (self.nely + 1), 1))
        f[topx, 0] = -100
        return f 
Example 26
Project: fenics-topopt   Author: zfergus   File: boundary_conditions.py    MIT License 5 votes vote down vote up
def get_fixed_nodes(self):
        # Return a list of fixed nodes for the problem
        dofs = np.arange(2 * (self.nelx + 1) * (self.nely + 1))
        fixed = np.union1d(dofs[0:2 * (self.nely + 1):2],
            np.array([2 * (self.nelx + 1) * (self.nely + 1) - 1]))
        return fixed 
Example 27
Project: fenics-topopt   Author: zfergus   File: problem.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, penal, bc):
        # Problem size
        self.nelx = nelx
        self.nely = nely

        # Max and min stiffness
        self.Emin = 1e-9
        self.Emax = 1.0

        # SIMP penalty
        self.penal = penal

        # dofs:
        self.ndof = 2 * (nelx + 1) * (nely + 1)

        # FE: Build the index vectors for the for coo matrix format.
        self.build_indices(nelx, nely)

        # BC's and support (half MBB-beam)
        dofs = np.arange(2 * (nelx + 1) * (nely + 1))
        self.fixed = bc.get_fixed_nodes()
        self.free = np.setdiff1d(dofs, self.fixed)

        # Solution and RHS vectors
        self.f = bc.get_forces()
        self.u = np.zeros(self.f.shape)

        # Per element compliance
        self.ce = np.zeros(nely * nelx) 
Example 28
Project: fenics-topopt   Author: zfergus   File: L_bracket.py    MIT License 5 votes vote down vote up
def get_fixed_nodes(self):
        """ Return a list of fixed nodes for the problem. """
        x = np.arange(self.passive_min_x)
        topx_to_id = np.vectorize(
            lambda x: xy_to_id(x, 0, self.nelx, self.nely))
        ids = topx_to_id(x)
        fixed = np.union1d(2 * ids, 2 * ids + 1)
        return fixed 
Example 29
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: layer.py    MIT License 5 votes vote down vote up
def _shuffle_roidb_inds(self):
    """Randomly permute the training roidb."""
    # If the random flag is set, 
    # then the database is shuffled according to system time
    # Useful for the validation set
    if self._random:
      st0 = np.random.get_state()
      millis = int(round(time.time() * 1000)) % 4294967295
      np.random.seed(millis)
    
    if cfg.TRAIN.ASPECT_GROUPING:
      raise NotImplementedError
      '''
      widths = np.array([r['width'] for r in self._roidb])
      heights = np.array([r['height'] for r in self._roidb])
      horz = (widths >= heights)
      vert = np.logical_not(horz)
      horz_inds = np.where(horz)[0]
      vert_inds = np.where(vert)[0]
      inds = np.hstack((
          np.random.permutation(horz_inds),
          np.random.permutation(vert_inds)))
      inds = np.reshape(inds, (-1, 2))
      row_perm = np.random.permutation(np.arange(inds.shape[0]))
      inds = np.reshape(inds[row_perm, :], (-1,))
      self._perm = inds
      '''
    else:
      self._perm = np.random.permutation(np.arange(len(self._roidb)))
    # Restore the random state
    if self._random:
      np.random.set_state(st0)
      
    self._cur = 0 
Example 30
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example 31
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: generate_anchors.py    MIT License 5 votes vote down vote up
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
                     scales=2 ** np.arange(3, 6)):
  """
  Generate anchor (reference) windows by enumerating aspect ratios X
  scales wrt a reference (0, 0, 15, 15) window.
  """

  base_anchor = np.array([1, 1, base_size, base_size]) - 1
  ratio_anchors = _ratio_enum(base_anchor, ratios)
  anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
                       for i in range(ratio_anchors.shape[0])])
  return anchors 
Example 32
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def sample_data_3d():
    """Create three dimensional test data."""
    temp = 10 * np.random.rand(2, 2, 10)
    lon = [[-99.83, -99.32], [-99.79, -99.23]]
    lat = [[42.25, 42.21], [42.63, 42.59]]
    ds = xr.Dataset({'temp': (['x', 'y', 'time'], temp)},
                coords={'lon': (['x', 'y'], lon),
                        'lat': (['x', 'y'], lat),
                        'time': np.arange(10)})
    return ds 
Example 33
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def numpy_detrend(da):
    """
    Detrend a 2D field by subtracting out the least-square plane fit.

    Parameters
    ----------
    da : `numpy.array`
        The data to be detrended

    Returns
    -------
    da : `numpy.array`
        The detrended input data
    """
    N = da.shape

    G = np.ones((N[0]*N[1],3))
    for i in range(N[0]):
        G[N[1]*i:N[1]*i+N[1], 1] = i+1
        G[N[1]*i:N[1]*i+N[1], 2] = np.arange(1, N[1]+1)

    d_obs = np.reshape(da.copy(), (N[0]*N[1],1))
    m_est = np.dot(np.dot(spl.inv(np.dot(G.T, G)), G.T), d_obs)
    d_est = np.dot(G, m_est)

    lin_trend = np.reshape(d_est, N)

    return da - lin_trend 
Example 34
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def test_isotropic_ps():
    """Test data with extra coordinates"""
    da = xr.DataArray(np.random.rand(2,5,5,16,32),
                  dims=['time','zz','z','y', 'x'],
                  coords={'time': np.array(['2019-04-18', '2019-04-19'],
                                          dtype='datetime64'),
                         'zz': np.arange(5), 'z': np.arange(5),
                         'y': np.arange(16), 'x': np.arange(32)})
    with pytest.raises(ValueError):
        xrft.isotropic_powerspectrum(da, dim=['y','x'])
    da = da[:,0,:,:,:].drop(['zz'])
    with pytest.raises(ValueError):
        xrft.isotropic_powerspectrum(da, dim=['z','y','x'])
    iso_ps = xrft.isotropic_powerspectrum(da, dim=['y','x']).values
    npt.assert_almost_equal(np.ma.masked_invalid(iso_ps[:,:,1:]).mask.sum(), 0.) 
Example 35
Project: comet-commonsense   Author: atcbosselut   File: generate_conceptnet_beam_search.py    Apache License 2.0 5 votes vote down vote up
def make_batch(X):
    X = np.array(X)
    assert X.ndim in [1, 2]
    if X.ndim == 1:
        X = np.expand_dims(X, axis=0)
    pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1])
    pos_enc = np.expand_dims(pos_enc, axis=0)
    batch = np.stack([X, pos_enc], axis=-1)
    batch = torch.tensor(batch, dtype=torch.long).to(device)
    return batch 
Example 36
Project: comet-commonsense   Author: atcbosselut   File: generate_atomic_greedy.py    Apache License 2.0 5 votes vote down vote up
def make_batch(X):
    X = np.array(X)
    assert X.ndim in [1, 2]
    if X.ndim == 1:
        X = np.expand_dims(X, axis=0)
    pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1])
    pos_enc = np.expand_dims(pos_enc, axis=0)
    batch = np.stack([X, pos_enc], axis=-1)
    batch = torch.tensor(batch, dtype=torch.long).to(device)
    return batch 
Example 37
Project: comet-commonsense   Author: atcbosselut   File: generate_atomic_topk.py    Apache License 2.0 5 votes vote down vote up
def make_batch(X):
    X = np.array(X)
    assert X.ndim in [1, 2]
    if X.ndim == 1:
        X = np.expand_dims(X, axis=0)
    pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1])
    pos_enc = np.expand_dims(pos_enc, axis=0)
    batch = np.stack([X, pos_enc], axis=-1)
    batch = torch.tensor(batch, dtype=torch.long).to(device)
    return batch 
Example 38
Project: comet-commonsense   Author: atcbosselut   File: generate_atomic_beam_search.py    Apache License 2.0 5 votes vote down vote up
def make_batch(X):
    X = np.array(X)
    assert X.ndim in [1, 2]
    if X.ndim == 1:
        X = np.expand_dims(X, axis=0)
    pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1])
    pos_enc = np.expand_dims(pos_enc, axis=0)
    batch = np.stack([X, pos_enc], axis=-1)
    batch = torch.tensor(batch, dtype=torch.long).to(device)
    return batch 
Example 39
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    MIT License 5 votes vote down vote up
def getTsvDataCharBased(self, filepath):
        print("Loading training data from "+filepath)
        x1=[]
        x2=[]
        y=[]
        # positive samples from file
        for line in open(filepath):
            l=line.strip().split("\t")
            if len(l)<2:
                continue
            if random() > 0.5:
               x1.append(l[0].lower())
               x2.append(l[1].lower())
            else:
               x1.append(l[1].lower())
               x2.append(l[0].lower())
            y.append(1)#np.array([0,1]))
        # generate random negative samples
        combined = np.asarray(x1+x2)
        shuffle_indices = np.random.permutation(np.arange(len(combined)))
        combined_shuff = combined[shuffle_indices]
        for i in xrange(len(combined)):
            x1.append(combined[i])
            x2.append(combined_shuff[i])
            y.append(0) #np.array([1,0]))
        return np.asarray(x1),np.asarray(x2),np.asarray(y) 
Example 40
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    MIT License 5 votes vote down vote up
def getDataSets(self, training_paths, max_document_length, percent_dev, batch_size, is_char_based):
        if is_char_based:
            x1_text, x2_text, y=self.getTsvDataCharBased(training_paths)
        else:
            x1_text, x2_text, y=self.getTsvData(training_paths)
        # Build vocabulary
        print("Building vocabulary")
        vocab_processor = MyVocabularyProcessor(max_document_length,min_frequency=0,is_char_based=is_char_based)
        vocab_processor.fit_transform(np.concatenate((x2_text,x1_text),axis=0))
        print("Length of loaded vocabulary ={}".format( len(vocab_processor.vocabulary_)))
        i1=0
        train_set=[]
        dev_set=[]
        sum_no_of_batches = 0
        x1 = np.asarray(list(vocab_processor.transform(x1_text)))
        x2 = np.asarray(list(vocab_processor.transform(x2_text)))
        # Randomly shuffle data
        np.random.seed(131)
        shuffle_indices = np.random.permutation(np.arange(len(y)))
        x1_shuffled = x1[shuffle_indices]
        x2_shuffled = x2[shuffle_indices]
        y_shuffled = y[shuffle_indices]
        dev_idx = -1*len(y_shuffled)*percent_dev//100
        del x1
        del x2
        # Split train/test set
        self.dumpValidation(x1_text,x2_text,y,shuffle_indices,dev_idx,0)
        # TODO: This is very crude, should use cross-validation
        x1_train, x1_dev = x1_shuffled[:dev_idx], x1_shuffled[dev_idx:]
        x2_train, x2_dev = x2_shuffled[:dev_idx], x2_shuffled[dev_idx:]
        y_train, y_dev = y_shuffled[:dev_idx], y_shuffled[dev_idx:]
        print("Train/Dev split for {}: {:d}/{:d}".format(training_paths, len(y_train), len(y_dev)))
        sum_no_of_batches = sum_no_of_batches+(len(y_train)//batch_size)
        train_set=(x1_train,x2_train,y_train)
        dev_set=(x1_dev,x2_dev,y_dev)
        gc.collect()
        return train_set,dev_set,vocab_processor,sum_no_of_batches 
Example 41
Project: cgp-cnn   Author: sg-nm   File: cgp_config.py    MIT License 5 votes vote down vote up
def __call__(self, net_lists):
        evaluations = np.zeros(len(net_lists))

        for i in np.arange(0, len(net_lists), self.gpu_num):
            process_num = np.min((i + self.gpu_num, len(net_lists))) - i

            pool = mp.Pool(process_num)
            arg_data = [(cnn_eval, net_lists[i+j], j, self.epoch_num, self.batchsize, self.dataset,
                         self.valid_data_ratio, self.verbose) for j in range(process_num)]
            evaluations[i:i+process_num] = pool.map(arg_wrapper_mp, arg_data)
            pool.terminate()

        return evaluations 
Example 42
Project: prediction-constrained-topic-models   Author: dtak   File: calc_coherence_metrics.py    MIT License 5 votes vote down vote up
def calc_pairwise_cooccurance_counts(
        x_csr_DV=None,
        dataset=None,
        ):
    """ Calculate word cooccurances across a corpus of D documents

    Returns
    -------
    ndocs_V : 1D array, size V
        entry v counts the number of documents that contain v at least once
    ndocs_csc_VV : 2D csc sparse matrix, V x V
        entry v,w counts the number of documents which contain
        the word pair (v, w) at least once

    Examples
    --------
    >>> x_DV = np.arange(6)[:,np.newaxis] * np.hstack([np.eye(6), np.zeros((6, 3))])
    >>> x_DV[:3, :3] += 1
    >>> x_DV[4, 5] += 17
    >>> ndocs_V, ndocs_csc_VV = calc_pairwise_cooccurance_counts(x_csr_DV=x_DV)
    >>> ndocs_V.astype(np.int32).tolist()
    [3, 3, 3, 1, 1, 2, 0, 0, 0]
    >>> ndocs_csc_VV.toarray()[:3, :3]
    array([[ 3.,  3.,  3.],
           [ 3.,  3.,  3.],
           [ 3.,  3.,  3.]])
    """
    if x_csr_DV is None:
        x_csr_DV = dataset['x_csr_DV']
    x_csr_DV = scipy.sparse.csr_matrix(x_csr_DV, dtype=np.float64)

    binx_csr_DV = x_csr_DV.copy()
    binx_csr_DV.data[:] = 1.0

    ndocs_V = np.squeeze(np.asarray(binx_csr_DV.sum(axis=0)))

    ndocs_csc_VV = (binx_csr_DV.T * binx_csr_DV).tocsc()
    return ndocs_V, ndocs_csc_VV 
Example 43
Project: curriculum-dropout   Author: pmorerio   File: load.py    GNU General Public License v3.0 5 votes vote down vote up
def one_hot(x,n):
	if type(x) == list:
		x = np.array(x)
	x = x.flatten()
	o_h = np.zeros((len(x),n))
	o_h[np.arange(len(x)),x] = 1
	return o_h 
Example 44
Project: curriculum-dropout   Author: pmorerio   File: load.py    GNU General Public License v3.0 5 votes vote down vote up
def one_hot(x,n):
	if type(x) == list:
		x = np.array(x)
	x = x.flatten()
	o_h = np.zeros((len(x),n))
	o_h[np.arange(len(x)),x] = 1
	return o_h 
Example 45
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: data.py    MIT License 5 votes vote down vote up
def shuffle(self):
    batch = self.FLAGS.batch
    data = self.parse()
    size = len(data)

    print('Dataset of {} instance(s)'.format(size))
    if batch > size: self.FLAGS.batch = batch = size
    batch_per_epoch = int(size / batch)

    for i in range(self.FLAGS.epoch):
        shuffle_idx = perm(np.arange(size))
        for b in range(batch_per_epoch):
            # yield these
            x_batch = list()
            feed_batch = dict()

            for j in range(b*batch, b*batch+batch):
                train_instance = data[shuffle_idx[j]]
                try:
                    inp, new_feed = self._batch(train_instance)
                except ZeroDivisionError:
                    print("This image's width or height are zeros: ", train_instance[0])
                    print('train_instance:', train_instance)
                    print('Please remove or fix it then try again.')
                    raise

                if inp is None: continue
                x_batch += [np.expand_dims(inp, 0)]

                for key in new_feed:
                    new = new_feed[key]
                    old_feed = feed_batch.get(key, 
                        np.zeros((0,) + new.shape))
                    feed_batch[key] = np.concatenate([ 
                        old_feed, [new] 
                    ])      
            
            x_batch = np.concatenate(x_batch, 0)
            yield x_batch, feed_batch
        
        print('Finish {} epoch(es)'.format(i + 1)) 
Example 46
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: movielens_test.py    MIT License 5 votes vote down vote up
def train():
    M = read_dataset()

    num_rating = np.count_nonzero(M)
    idx = np.arange(num_rating)
    np.random.seed(0)
    np.random.shuffle(idx)

    train_idx = idx[:int(0.8 * num_rating)]
    valid_idx = idx[int(0.8 * num_rating):int(0.9 * num_rating)]
    test_idx = idx[int(0.9 * num_rating):]

    result_path = "{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}".format(
        hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, one_hot)
    if not os.path.exists(result_path + "/model.ckpt.index"):
        with tf.Session() as sess:
            model = VAEMF(sess, num_user, num_item,
                          hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                          latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, one_hot=one_hot)
            print("Train size={0}, Validation size={1}, Test size={2}".format(
                train_idx.size, valid_idx.size, test_idx.size))
            best_mse, best_mae = model.train_test_validation(
                M, train_idx=train_idx, test_idx=test_idx, valid_idx=valid_idx, n_steps=n_steps, result_path=result_path)

            print("Best MSE = {0}, best MAE = {1}".format(
                best_mse, best_mae)) 
Example 47
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: movielens_test.py    MIT License 5 votes vote down vote up
def train_test_validation():
    M = read_dataset()

    num_rating = np.count_nonzero(M)
    idx = np.arange(num_rating)
    np.random.seed(0)
    np.random.shuffle(idx)

    train_idx = idx[:int(0.8 * num_rating)]
    valid_idx = idx[int(0.8 * num_rating):int(0.9 * num_rating)]
    test_idx = idx[int(0.9 * num_rating):]

    for hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, one_hot in itertools.product(hedims, hddims, ldims, odims, lrates, bsizes, regs, one_hots):
        result_path = "{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}".format(
            hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, one_hot)
        if not os.path.exists(result_path + "/model.ckpt.index"):
            with tf.Session() as sess:
                model = VAEMF(sess, num_user, num_item,
                              hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                              latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, one_hot=one_hot)
                print("Train size={0}, Validation size={1}, Test size={2}".format(
                    train_idx.size, valid_idx.size, test_idx.size))
                best_mse, best_mae = model.train_test_validation(
                    M, train_idx=train_idx, test_idx=test_idx, valid_idx=valid_idx, n_steps=n_steps, result_path=result_path)

                print("Best MSE = {0}, best MAE = {1}".format(
                    best_mse, best_mae))

                with open('result.csv', 'a') as f:
                    f.write("{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}\n".format(hidden_encoder_dim, hidden_decoder_dim,
                                                                               latent_dim, output_dim, learning_rate, batch_size, reg_param, one_hot, best_mse, best_mae))

        tf.reset_default_graph() 
Example 48
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: movielens_vae_test.py    MIT License 5 votes vote down vote up
def train_test_validation():
    M = read_dataset()

    num_rating = np.count_nonzero(M)
    idx = np.arange(num_rating)
    np.random.seed(1)
    np.random.shuffle(idx)

    train_idx = idx[:int(0.85 * num_rating)]
    valid_idx = idx[int(0.85 * num_rating):int(0.90 * num_rating)]
    test_idx = idx[int(0.90 * num_rating):]

    for hidden_encoder_dim, hidden_decoder_dim, latent_dim, learning_rate, batch_size, reg_param, vae in itertools.product(hedims, hddims, ldims, lrates, bsizes, regs, vaes):
        result_path = "{0}_{1}_{2}_{3}_{4}_{5}_{6}".format(
            hidden_encoder_dim, hidden_decoder_dim, latent_dim, learning_rate, batch_size, reg_param, vae)
        if not os.path.exists(result_path + "/model.ckpt.index"):
            config = tf.ConfigProto()
            config.gpu_options.allow_growth=True
            with tf.Session(config=config) as sess:
                model = VAEMF(sess, num_user, num_item,
                              hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                              latent_dim=latent_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, vae=vae)
                print("Train size={0}, Validation size={1}, Test size={2}".format(
                    train_idx.size, valid_idx.size, test_idx.size))
                print(result_path)
                best_rmse = model.train_test_validation(M, train_idx=train_idx, test_idx=test_idx, valid_idx=valid_idx, n_steps=n_steps, result_path=result_path)

                print("Best MSE = {0}".format(best_rmse))

                with open('result.csv', 'a') as f:
                    f.write("{0},{1},{2},{3},{4},{5},{6},{7}\n".format(hidden_encoder_dim, hidden_decoder_dim,
                                                                               latent_dim, learning_rate, batch_size, reg_param, vae, best_rmse))

        tf.reset_default_graph() 
Example 49
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap 
Example 50
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: generate_anchors.py    MIT License 5 votes vote down vote up
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
                     scales=2 ** np.arange(3, 6)):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, 15, 15) window.
    """

    base_anchor = np.array([1, 1, base_size, base_size]) - 1
    ratio_anchors = _ratio_enum(base_anchor, ratios)
    anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
                         for i in range(ratio_anchors.shape[0])])
    return anchors