Python numpy.int() Examples

The following are 30 code examples of numpy.int(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_masks.py    From mmdetection with Apache License 2.0 7 votes vote down vote up
def test_bitmap_mask_crop():
    # crop with empty bitmap masks
    dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int)
    raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    cropped_masks = bitmap_masks.crop(dummy_bbox)
    assert len(cropped_masks) == 0
    assert cropped_masks.height == 17
    assert cropped_masks.width == 10

    # crop with bitmap masks contain 3 instances
    raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    cropped_masks = bitmap_masks.crop(dummy_bbox)
    assert len(cropped_masks) == 3
    assert cropped_masks.height == 17
    assert cropped_masks.width == 10
    x1, y1, x2, y2 = dummy_bbox
    assert (cropped_masks.masks == raw_masks[:, y1:y2, x1:x2]).all()

    # crop with invalid bbox
    with pytest.raises(AssertionError):
        dummy_bbox = dummy_bboxes(2, 28, 28)
        bitmap_masks.crop(dummy_bbox) 
Example #2
Source File: custom.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def __getitem__(self, idx):
        """Get training/test data after pipeline.

        Args:
            idx (int): Index of data.

        Returns:
            dict: Training/test data (with annotation if `test_mode` is set
                True).
        """

        if self.test_mode:
            return self.prepare_test_img(idx)
        while True:
            data = self.prepare_train_img(idx)
            if data is None:
                idx = self._rand_another(idx)
                continue
            return data 
Example #3
Source File: images.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def to_image_spec(img, **kw):
    '''
    to_image_spec(img) yields a dictionary of meta-data for the given nibabel image object img.
    to_image_spec(hdr) yields the equivalent meta-data for the given nibabel image header.

    Note that obj may also be a mapping object, in which case it is returned verbatim.
    '''
    if pimms.is_vector(img,'int') and is_tuple(img) and len(img) < 5:
        r = image_array_to_spec(np.zeros(img))
    elif pimms.is_map(img):    r = img
    elif is_image_header(img): r = image_header_to_spec(img)
    elif is_image(img):        r = image_to_spec(img)
    elif is_image_array(img):  r = image_array_to_spec(img)
    else: raise ValueError('cannot convert object of type %s to image-spec' % type(img))
    if len(kw) > 0: r = {k:v for m in (r,kw) for (k,v) in six.iteritems(m)}
    # normalize the entries
    for (k,aliases) in six.iteritems(imspec_aliases):
        if k in r: continue
        for al in aliases:
            if al in r:
                val = r[al]
                r = pimms.assoc(pimms.dissoc(r, al), k, val)
                break
    return r 
Example #4
Source File: synthetic_model.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def GenerateSingleCode(code_shape):
  code = np.zeros(code_shape, dtype=np.int)

  keep_value_proba = 0.8

  height = code_shape[0]
  width = code_shape[1]
  depth = code_shape[2]

  for d in xrange(depth):
    for y in xrange(height):
      for x in xrange(width):
        v1 = ComputeLineCrc(code, width, y, x, d)
        v2 = ComputeDepthCrc(code, y, x, d)
        v = 1 if (v1 + v2 >= 6) else 0
        if np.random.rand() < keep_value_proba:
          code[y, x, d] = v
        else:
          code[y, x, d] = 1 - v

  return code 
Example #5
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def curve_length(self, start=None, end=None, precision=0.01):
        '''
        Calculates the length of the curve by dividing the curve up
        into pieces of parameterized-length <precision>.
        '''
        if start is None: start = self.t[0]
        if end is None: end = self.t[-1]
        from scipy import interpolate
        if self.order == 1:
            # we just want to add up along the steps...
            ii = [ii for (ii,t) in enumerate(self.t) if start < t and t < end]
            ts = np.concatenate([[start], self.t[ii], [end]])
            xy = np.vstack([[self(start)], self.coordinates[:,ii].T, [self(end)]])
            return np.sum(np.sqrt(np.sum((xy[1:] - xy[:-1])**2, axis=1)))
        else:
            t = np.linspace(start, end, int(np.ceil((end-start)/precision)))
            dt = t[1] - t[0]
            dx = interpolate.splev(t, self.splrep[0], der=1)
            dy = interpolate.splev(t, self.splrep[1], der=1)
            return np.sum(np.sqrt(dx**2 + dy**2)) * dt 
Example #6
Source File: test_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def gen_buckets_probs_with_ppf(ppf, nbuckets):
    """Generate the buckets and probabilities for chi_square test when the ppf (Quantile function)
     is specified.

    Parameters
    ----------
    ppf : function
        The Quantile function that takes a probability and maps it back to a value.
        It's the inverse of the cdf function
    nbuckets : int
        size of the buckets

    Returns
    -------
    buckets : list of tuple
        The generated buckets
    probs : list
        The generate probabilities
    """
    assert nbuckets > 0
    probs = [1.0 / nbuckets for _ in range(nbuckets)]
    buckets = [(ppf(i / float(nbuckets)), ppf((i + 1) / float(nbuckets))) for i in range(nbuckets)]
    return buckets, probs 
Example #7
Source File: models.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def cleaned_visual_areas(visual_areas, faces):
        '''
        mdl.cleaned_visual_areas is the same as mdl.visual_areas except that vertices with visual
        area values of 0 (boundary values) are given the mode of their neighbors.
        '''
        area_ids = np.array(visual_areas)
        boundaryNeis = {}
        for (b,inside) in [(b, set(inside))
                           for t in faces.T
                           for (bound, inside) in [([i for i in t if area_ids[i] == 0],
                                                    [i for i in t if area_ids[i] != 0])]
                           if len(bound) > 0 and len(inside) > 0
                           for b in bound]:
            if b in boundaryNeis: boundaryNeis[b] |= inside
            else:                 boundaryNeis[b] =  inside
        for (b,neis) in six.iteritems(boundaryNeis):
            area_ids[b] = np.argmax(np.bincount(area_ids[list(neis)]))
        return pimms.imm_array(np.asarray(area_ids, dtype=np.int)) 
Example #8
Source File: test.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def _project_im_rois(im_rois, scales):
    """Project image RoIs into the image pyramid built by _get_image_blob.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        scales (list): scale factors as returned by _get_image_blob
    Returns:
        rois (ndarray): R x 4 matrix of projected RoI coordinates
        levels (list): image pyramid levels used by each projected RoI
    """
    im_rois = im_rois.astype(np.float, copy=False)

    if len(scales) > 1:
        widths = im_rois[:, 2] - im_rois[:, 0] + 1
        heights = im_rois[:, 3] - im_rois[:, 1] + 1
        areas = widths * heights
        scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
        diff_areas = np.abs(scaled_areas - 224 * 224)
        levels = diff_areas.argmin(axis=1)[:, np.newaxis]
    else:
        levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)

    rois = im_rois * scales[levels]

    return rois, levels 
Example #9
Source File: map_utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False):
  """Projects points to map, returns how many points are present at each
  location."""
  num_points = np.zeros((map.size[1], map.size[0]))
  vertex_ = vertex[:, :2] - map.origin
  vertex_ = np.round(vertex_ / map.resolution).astype(np.int)
  if ignore_points_outside_map:
    good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1],
                                vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]),
                      axis=0)
    vertex_ = vertex_[good_ind, :]
    if wt is not None:
      wt = wt[good_ind, :]
  if wt is None:
    np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1)
  else:
    assert(wt.shape[0] == vertex.shape[0]), \
      'number of weights should be same as vertices.'
    np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt)
  return num_points 
Example #10
Source File: test_train.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def _project_im_rois(im_rois, scales):
    """Project image RoIs into the image pyramid built by _get_image_blob.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        scales (list): scale factors as returned by _get_image_blob
    Returns:
        rois (ndarray): R x 4 matrix of projected RoI coordinates
        levels (list): image pyramid levels used by each projected RoI
    """
    im_rois = im_rois.astype(np.float, copy=False)

    if len(scales) > 1:
        widths = im_rois[:, 2] - im_rois[:, 0] + 1
        heights = im_rois[:, 3] - im_rois[:, 1] + 1
        areas = widths * heights
        scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
        diff_areas = np.abs(scaled_areas - 224 * 224)
        levels = diff_areas.argmin(axis=1)[:, np.newaxis]
    else:
        levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)

    rois = im_rois * scales[levels]

    return rois, levels 
Example #11
Source File: test_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def _validate_csr_generation_inputs(num_rows, num_cols, density,
                                    distribution="uniform"):
    """Validates inputs for csr generation helper functions
    """
    total_nnz = int(num_rows * num_cols * density)
    if density < 0 or density > 1:
        raise ValueError("density has to be between 0 and 1")

    if num_rows <= 0 or num_cols <= 0:
        raise ValueError("num_rows or num_cols should be greater than 0")

    if distribution == "powerlaw":
        if total_nnz < 2 * num_rows:
            raise ValueError("not supported for this density: %s"
                             " for this shape (%s, %s)"
                             " Please keep :"
                             " num_rows * num_cols * density >= 2 * num_rows"
                             % (density, num_rows, num_cols)) 
Example #12
Source File: synthetic_data_utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100):
  """ Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
  Args:
    data_e: nexamples length list of NxT trials
    dt: how often the data are sampled
    max_firing_rate: the firing rate that is associated with a value of 1.0
  Returns:
    spikified_data_e: a list of length b of the data represented as spikes,
    sampled from the underlying poisson process.
    """

  spikifies_data_e = []
  E = len(data_e)
  spikes_e = []
  for e in range(E):
    data = data_e[e]
    N,T = data.shape
    data_s = np.zeros([N,T]).astype(np.int)
    for n in range(N):
      f = data[n,:]
      s = rng.poisson(f*max_firing_rate*dt, size=T)
      data_s[n,:] = s
    spikes_e.append(data_s)

  return spikes_e 
Example #13
Source File: operators.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def dqn_sym_nips(action_num, data=None, name='dqn'):
    """Structure of the Deep Q Network in the NIPS 2013 workshop paper:
    Playing Atari with Deep Reinforcement Learning (https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf)

    Parameters
    ----------
    action_num : int
    data : mxnet.sym.Symbol, optional
    name : str, optional
    """
    if data is None:
        net = mx.symbol.Variable('data')
    else:
        net = data
    net = mx.symbol.Convolution(data=net, name='conv1', kernel=(8, 8), stride=(4, 4), num_filter=16)
    net = mx.symbol.Activation(data=net, name='relu1', act_type="relu")
    net = mx.symbol.Convolution(data=net, name='conv2', kernel=(4, 4), stride=(2, 2), num_filter=32)
    net = mx.symbol.Activation(data=net, name='relu2', act_type="relu")
    net = mx.symbol.Flatten(data=net)
    net = mx.symbol.FullyConnected(data=net, name='fc3', num_hidden=256)
    net = mx.symbol.Activation(data=net, name='relu3', act_type="relu")
    net = mx.symbol.FullyConnected(data=net, name='fc4', num_hidden=action_num)
    net = mx.symbol.Custom(data=net, name=name, op_type='DQNOutput')
    return net 
Example #14
Source File: custom.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def prepare_test_img(self, idx):
        """Get testing data  after pipeline.

        Args:
            idx (int): Index of data.

        Returns:
            dict: Testing data after pipeline with new keys intorduced by
                piepline.
        """

        img_info = self.data_infos[idx]
        results = dict(img_info=img_info)
        if self.proposals is not None:
            results['proposals'] = self.proposals[idx]
        self.pre_pipeline(results)
        return self.pipeline(results) 
Example #15
Source File: test_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def var_check(generator, sigma, nsamples=1000000):
    """Test the generator by matching the variance.
    It will need a large number of samples and is not recommended to use

    We test the sample variance by checking if it falls inside the range
        (sigma^2 - 3 * sqrt(2 * sigma^4 / (n-1)), sigma^2 + 3 * sqrt(2 * sigma^4 / (n-1)))

    References::

        @incollection{goucher2009beautiful,
              title={Beautiful Testing: Leading Professionals Reveal How They Improve Software},
              author={Goucher, Adam and Riley, Tim},
              year={2009},
              chapter=10
        }

    Examples::

        generator = lambda x: np.random.normal(0, 1.0, size=x)
        var_check_ret = var_check(generator, 0, 1.0)

    Parameters
    ----------
    generator : function
        The generator function. It's expected to generate N i.i.d samples by calling generator(N).
    sigma : float
    nsamples : int

    Returns
    -------
    ret : bool
        Whether the variance test succeeds
    """
    samples = np.array(generator(nsamples))
    sample_var = samples.var(ddof=1)
    ret = (sample_var > sigma ** 2 - 3 * np.sqrt(2 * sigma ** 4 / (nsamples - 1))) and\
          (sample_var < sigma ** 2 + 3 * np.sqrt(2 * sigma ** 4 / (nsamples - 1)))
    return ret 
Example #16
Source File: dataset.py    From PolarSeg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __getitem__(self, index):
        raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
        if self.imageset == 'test':
            annotated_data = np.expand_dims(np.zeros_like(raw_data[:,0],dtype=int),axis=1)
        else:
            annotated_data = np.fromfile(self.im_idx[index].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
            annotated_data = annotated_data & 0xFFFF #delete high 16 digits binary
            annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
        data_tuple = (raw_data[:,:3], annotated_data.astype(np.uint8))
        if self.return_ref:
            data_tuple += (raw_data[:,3],)
        return data_tuple 
Example #17
Source File: tf_utils.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def setup_training(loss_op, initial_learning_rate, steps_per_decay,
                   learning_rate_decay, momentum, max_steps,
                   sync=False, adjust_lr_sync=True,
                   num_workers=1, replica_id=0, vars_to_optimize=None, 
                   clip_gradient_norm=0, typ=None, momentum2=0.999,
                   adam_eps=1e-8):
  if sync and adjust_lr_sync:
    initial_learning_rate = initial_learning_rate * num_workers
    max_steps = np.int(max_steps / num_workers)
    steps_per_decay = np.int(steps_per_decay / num_workers)

  global_step_op = slim.get_or_create_global_step()
  lr_op          = tf.train.exponential_decay(initial_learning_rate,
    global_step_op, steps_per_decay, learning_rate_decay, staircase=True)
  if typ == 'sgd':
    optimizer      = tf.train.MomentumOptimizer(lr_op, momentum)
  elif typ == 'adam':
    optimizer      = tf.train.AdamOptimizer(learning_rate=lr_op, beta1=momentum,
                                            beta2=momentum2, epsilon=adam_eps)
  
  if sync:
    
    sync_optimizer = tf.train.SyncReplicasOptimizer(optimizer, 
                                               replicas_to_aggregate=num_workers, 
                                               replica_id=replica_id, 
                                               total_num_replicas=num_workers)
    train_op       = slim.learning.create_train_op(loss_op, sync_optimizer,
                                                   variables_to_train=vars_to_optimize,
                                                   clip_gradient_norm=clip_gradient_norm)
  else:
    sync_optimizer = None
    train_op       = slim.learning.create_train_op(loss_op, optimizer,
                                                   variables_to_train=vars_to_optimize,
                                                   clip_gradient_norm=clip_gradient_norm)
    should_stop_op = tf.greater_equal(global_step_op, max_steps)
  return lr_op, global_step_op, train_op, should_stop_op, optimizer, sync_optimizer 
Example #18
Source File: numpy_softmax.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def backward(self, out_grad, in_data, out_data, in_grad):
        l = in_data[1]
        l = l.reshape((l.size,)).astype(np.int)
        y = out_data[0]
        dx = in_grad[0]
        dx[:] = y
        dx[np.arange(l.shape[0]), l] -= 1.0

# define mlp 
Example #19
Source File: test_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def mean_check(generator, mu, sigma, nsamples=1000000):
    """Test the generator by matching the mean.

    We test the sample mean by checking if it falls inside the range
        (mu - 3 * sigma / sqrt(n), mu + 3 * sigma / sqrt(n))

    References::

        @incollection{goucher2009beautiful,
              title={Beautiful Testing: Leading Professionals Reveal How They Improve Software},
              author={Goucher, Adam and Riley, Tim},
              year={2009},
              chapter=10
        }

    Examples::

        generator = lambda x: np.random.normal(0, 1.0, size=x)
        mean_check_ret = mean_check(generator, 0, 1.0)

    Parameters
    ----------
    generator : function
        The generator function. It's expected to generate N i.i.d samples by calling generator(N).
    mu : float
    sigma : float
    nsamples : int

    Returns
    -------
    ret : bool
        Whether the mean test succeeds
    """
    samples = np.array(generator(nsamples))
    sample_mean = samples.mean()
    ret = (sample_mean > mu - 3 * sigma / np.sqrt(nsamples)) and\
          (sample_mean < mu + 3 * sigma / np.sqrt(nsamples))
    return ret 
Example #20
Source File: build_mscoco_data.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def main(unused_argv):
  def _is_valid_num_shards(num_shards):
    """Returns True if num_shards is compatible with FLAGS.num_threads."""
    return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads

  assert _is_valid_num_shards(FLAGS.train_shards), (
      "Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
  assert _is_valid_num_shards(FLAGS.val_shards), (
      "Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
  assert _is_valid_num_shards(FLAGS.test_shards), (
      "Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")

  if not tf.gfile.IsDirectory(FLAGS.output_dir):
    tf.gfile.MakeDirs(FLAGS.output_dir)

  # Load image metadata from caption files.
  mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
                                                    FLAGS.train_image_dir)
  mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
                                                  FLAGS.val_image_dir)

  # Redistribute the MSCOCO data as follows:
  #   train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
  #   val_dataset = 5% of mscoco_val_dataset (for validation during training).
  #   test_dataset = 10% of mscoco_val_dataset (for final evaluation).
  train_cutoff = int(0.85 * len(mscoco_val_dataset))
  val_cutoff = int(0.90 * len(mscoco_val_dataset))
  train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
  val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
  test_dataset = mscoco_val_dataset[val_cutoff:]

  # Create vocabulary from the training captions.
  train_captions = [c for image in train_dataset for c in image.captions]
  vocab = _create_vocab(train_captions)

  _process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
  _process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
  _process_dataset("test", test_dataset, vocab, FLAGS.test_shards) 
Example #21
Source File: dec.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def cluster_acc(Y_pred, Y):
  from sklearn.utils.linear_assignment_ import linear_assignment
  assert Y_pred.size == Y.size
  D = max(Y_pred.max(), Y.max())+1
  w = np.zeros((D,D), dtype=np.int64)
  for i in range(Y_pred.size):
    w[Y_pred[i], int(Y[i])] += 1
  ind = linear_assignment(w.max() - w)
  return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w 
Example #22
Source File: custom_softmax.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
        l = in_data[1].asnumpy().ravel().astype(np.int)
        y = out_data[0].asnumpy()
        y[np.arange(l.shape[0]), l] -= 1.0
        self.assign(in_grad[0], req[0], mx.nd.array(y)) 
Example #23
Source File: cmp_summary.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _vis_readout_maps(outputs, global_step, output_dir, metric_summary, N):
  # outputs is [gt_map, pred_map]:
  if N >= 0:
    outputs = outputs[:N]
  N = len(outputs)

  plt.set_cmap('jet')
  fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*2), (5,5))
  axes = axes.ravel()[::-1].tolist()
  for i in range(N):
    gt_map, pred_map = outputs[i]
    for j in [0]:
      for k in range(gt_map.shape[4]):
        # Display something like the midpoint of the trajectory.
        id = np.int(gt_map.shape[1]/2)

        ax = axes.pop();
        ax.imshow(gt_map[j,id,:,:,k], origin='lower', interpolation='none',
                  vmin=0., vmax=1.)
        ax.set_axis_off();
        if i == 0: ax.set_title('gt_map')

        ax = axes.pop();
        ax.imshow(pred_map[j,id,:,:,k], origin='lower', interpolation='none',
                  vmin=0., vmax=1.)
        ax.set_axis_off();
        if i == 0: ax.set_title('pred_map')

  file_name = os.path.join(output_dir, 'readout_map_{:d}.png'.format(global_step))
  with fu.fopen(file_name, 'w') as f:
    fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
  plt.close(fig) 
Example #24
Source File: cmp_summary.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _summary_readout_maps(m, num_steps, arop_full_summary_iters):
  arop = []; arop_summary_iters = []; arop_eval_fns = [];
  id = np.int(num_steps-1)
  vis_readout_maps_gt = m.readout_maps_gt
  vis_readout_maps_prob = tf.reshape(m.readout_maps_probs,
                                     shape=tf.shape(vis_readout_maps_gt))
  vis_readout_maps_gt = tf.expand_dims(vis_readout_maps_gt[:,id,:,:,:], 1)
  vis_readout_maps_prob = tf.expand_dims(vis_readout_maps_prob[:,id,:,:,:], 1)
  arop += [[vis_readout_maps_gt, vis_readout_maps_prob]]
  arop_summary_iters += [arop_full_summary_iters]
  arop_eval_fns += [_vis_readout_maps]
  return arop, arop_summary_iters, arop_eval_fns 
Example #25
Source File: test_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def np_reduce(dat, axis, keepdims, numpy_reduce_func):
    """Compatible reduce for old version of NumPy.

    Parameters
    ----------
    dat : np.ndarray
        Same as NumPy.

    axis : None or int or list-like
        Same as NumPy.

    keepdims : bool
        Same as NumPy.

    numpy_reduce_func : function
        A NumPy reducing function like ``np.sum`` or ``np.max``.
    """
    if isinstance(axis, int):
        axis = [axis]
    else:
        axis = list(axis) if axis is not None else range(len(dat.shape))
    ret = dat
    for i in reversed(sorted(axis)):
        ret = numpy_reduce_func(ret, axis=i)
    if keepdims:
        keepdims_shape = list(dat.shape)
        for i in axis:
            keepdims_shape[i] = 1
        ret = ret.reshape(tuple(keepdims_shape))
    return ret 
Example #26
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def simplex_summation_matrix(simplices, weight=None, inverse=False):
    '''
    simplex_summation_matrix(mtx) yields a scipy sparse array matrix that, when dotted with a
      column vector of length m (where m is the number of simplices described in the simplex matrix,
      mtx), yields a vector of length n (where n is the number of vertices in the simplex mesh); the
      returned vetor is the sum over each vertex, of the faces to which it belongs.

    The matrix mtx must be oriented such that the first dimension (rows) corresponds to the vertices
    of the simplices and the second dimension (columns) corresponds to simplices themselves.

    The optional argument weight may specify a weight for each face, in which case the summation is
    a weighted sum instead of a flat sum.

    The optional argument inverse=True may be given to indicate that the inverse summation matrix
    (summation of the vertices onto the simplices) should be returned.
    '''
    simplices = np.asarray(simplices)
    n = np.max(simplices) + 1
    (d,m) = simplices.shape
    rng = range(m)
    if inverse:
        if weight is None: f = sps.csr_matrix
        else:
            nrng = range(n)
            ww = sps.csr_matrix((weight, (nrng, nrng)), shape=(n,n), dtype=np.float)
            f = lambda *args,**kwargs: ww.dot(sps.csc_matrix(*args,**kwargs))
        s = f((np.ones(d*m, dtype=np.int),
               (np.concatenate([rng for _ in range(d)]), np.concatenate(simplices))),
              shape=(m,n),
              dtype=np.int)
    else:
        s = sps.csr_matrix(
            (np.ones(d*m, dtype=np.int),
             (np.concatenate(simplices), np.concatenate([rng for _ in range(d)]))),
            shape=(n,m),
            dtype=np.int)
        if weight is not None:
            s = s.dot(sps.csc_matrix((weight, (rng, rng)), shape=(m,m), dtype=np.float))
    return s 
Example #27
Source File: cmag.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def _cmag_coord_idcs(coordinates):
    return [i for (i,(x,y)) in enumerate(zip(*coordinates))
            if (np.issubdtype(type(x), np.float) or np.issubdtype(type(x), np.int))
            if (np.issubdtype(type(y), np.float) or np.issubdtype(type(y), np.int))
            if not np.isnan(x) and not np.isnan(y)] 
Example #28
Source File: models.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def faces(tris):
        'mdl.faces is the triangle matrix for the given retinotopy mesh model mdl.'
        tris = np.asarray(tris, dtype=np.int)
        if tris.shape[0] != 3: tris = tris.T
        if tris.shape[0] != 3: raise ValueError('triangle matrix must have 3 rows or columns')
        return pimms.imm_array(tris) 
Example #29
Source File: images.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def image_shape(arg):
    '''
    image_shape(im) yields the image shape for the given image im. The argument im may be an image,
      an array, an image header, or an image spec.
    '''
    if   is_image(arg):                                sh = arg.shape
    elif pimms.is_vector(arg, 'int') and len(arg) < 5: sh = tuple(arg)
    elif is_image_spec(arg):                           sh = imspec_lookup(arg, 'image_shape')
    elif is_image_header(arg):                         sh = image_header_to_spec(arg)['image_shape']
    elif is_image_array(arg):                          sh = np.shape(arg)
    else: raise VelueError('Bad argument of type %s given to image_shape()' % type(arg))
    sh = tuple(sh)
    if   len(sh) == 2: sh = (sh[0], 1, 1, sh[1])
    elif len(sh) == 1: sh = (sh[0], 1, 1)
    return sh 
Example #30
Source File: functions.py    From deep-learning-note with MIT License 5 votes vote down vote up
def step_function(x):
    return np.array(x > 0, dtype=np.int)