Python numpy.floor() Examples

The following are 30 code examples for showing how to use numpy.floor(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: sklearn-audio-transfer-learning   Author: jordipons   File: mel_features.py    License: ISC License 6 votes vote down vote up
def frame(data, window_length, hop_length):
  """Convert array into a sequence of successive possibly overlapping frames.

  An n-dimensional array of shape (num_samples, ...) is converted into an
  (n+1)-D array of shape (num_frames, window_length, ...), where each frame
  starts hop_length points after the preceding one.

  This is accomplished using stride_tricks, so the original data is not
  copied.  However, there is no zero-padding, so any incomplete frames at the
  end are not included.

  Args:
    data: np.array of dimension N >= 1.
    window_length: Number of samples in each frame.
    hop_length: Advance (in samples) between each window.

  Returns:
    (N+1)-D np.array with as many rows as there are complete frames that can be
    extracted.
  """
  num_samples = data.shape[0]
  num_frames = 1 + int(np.floor((num_samples - window_length) / hop_length))
  shape = (num_frames, window_length) + data.shape[1:]
  strides = (data.strides[0] * hop_length,) + data.strides
  return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides) 
Example 2
Project: Kaggler   Author: jeongyoonlee   File: numerical.py    License: MIT License 6 votes vote down vote up
def _transform_col(self, x, i):
        """Encode one numerical feature column to quantiles.

        Args:
            x (pandas.Series): numerical feature column to encode
            i (int): column index of the numerical feature

        Returns:
            Encoded feature (pandas.Series).
        """
        # Map values to the emperical CDF between .1% and 99.9%
        rv = np.ones_like(x) * -1

        filt = ~np.isnan(x)
        rv[filt] = np.floor((self.ecdfs[i](x[filt]) * 0.998 + .001) *
                            self.n_label)

        return rv 
Example 3
Project: EXOSIMS   Author: dsavransky   File: keplerSTM_indprop.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, x0, mu, epsmult = 4.0, noc = False):
        #determine number of planets and validate input
        nplanets = x0.size/6.
        if (nplanets - np.floor(nplanets) > 0):
            raise Exception('The length of x0 must be a multiple of 6.')
        
        if (mu.size != nplanets):
            raise Exception('The length of mu must be the length of x0 divided by 6')
        
        self.nplanets = int(nplanets)
        self.mu = np.squeeze(mu)
        if (self.mu.size == 1):
            self.mu = np.array(mu)
        
        self.epsmult = epsmult
        
        if not(noc) and ('EXOSIMS.util.KeplerSTM_C.CyKeplerSTM' in sys.modules):
            self.havec = True
            self.x0 = np.squeeze(x0)
        else:
            self.havec = False
            self.updateState(np.squeeze(x0)) 
Example 4
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: dataset.py    License: MIT License 6 votes vote down vote up
def __call__(self, batch):
        images, labels = zip(*batch)

        imgH = self.imgH
        imgW = self.imgW
        if self.keep_ratio:
            ratios = []
            for image in images:
                w, h = image.size
                ratios.append(w / float(h))
            ratios.sort()
            max_ratio = ratios[-1]
            imgW = int(np.floor(max_ratio * imgH))
            imgW = max(imgH * self.min_ratio, imgW)  # assure imgH >= imgW

        transform = resizeNormalize((imgW, imgH))
        images = [transform(image) for image in images]
        images = torch.cat([t.unsqueeze(0) for t in images], 0)

        return images, labels 
Example 5
Project: differential-privacy-library   Author: IBM   File: geometric.py    License: MIT License 6 votes vote down vote up
def randomise(self, value):
        """Randomise `value` with the mechanism.

        Parameters
        ----------
        value : int
            The value to be randomised.

        Returns
        -------
        int
            The randomised value.

        """
        self.check_inputs(value)

        # Need to account for overlap of 0-value between distributions of different sign
        unif_rv = random() - 0.5
        unif_rv *= 1 + np.exp(self._scale)
        sgn = -1 if unif_rv < 0 else 1

        # Use formula for geometric distribution, with ratio of exp(-epsilon/sensitivity)
        return int(np.round(value + sgn * np.floor(np.log(sgn * unif_rv) / self._scale))) 
Example 6
Project: differential-privacy-library   Author: IBM   File: gaussian.py    License: MIT License 6 votes vote down vote up
def randomise(self, value):
        self.check_inputs(value)

        if self._scale == 0:
            return value

        tau = 1 / (1 + np.floor(self._scale))
        sigma2 = self._scale ** 2

        while True:
            geom_x = 0
            while self._bernoulli_exp(tau):
                geom_x += 1

            bern_b = np.random.binomial(1, 0.5)
            if bern_b and not geom_x:
                continue

            lap_y = int((1 - 2 * bern_b) * geom_x)
            bern_c = self._bernoulli_exp((abs(lap_y) - tau * sigma2) ** 2 / 2 / sigma2)
            if bern_c:
                return value + lap_y 
Example 7
Project: scarlet   Author: pmelchior   File: interpolation.py    License: MIT License 6 votes vote down vote up
def lanczos(dx, a=3):
    """Lanczos kernel

    Parameters
    ----------
    dx: float
        amount to shift image
    a: int
        Lanczos window size parameter

    Returns
    -------
    result: array-like
        1D Lanczos kernel
    """
    if np.abs(dx) > 1:
        raise ValueError("The fractional shift dx must be between -1 and 1")
    window = np.arange(-a + 1, a + 1) + np.floor(dx)
    y = np.sinc(dx - window) * np.sinc((dx - window) / a)
    return y, window.astype(int) 
Example 8
Project: HorizonNet   Author: sunset1995   File: pano_lsd_align.py    License: MIT License 6 votes vote down vote up
def warpImageFast(im, XXdense, YYdense):
    minX = max(1., np.floor(XXdense.min()) - 1)
    minY = max(1., np.floor(YYdense.min()) - 1)

    maxX = min(im.shape[1], np.ceil(XXdense.max()) + 1)
    maxY = min(im.shape[0], np.ceil(YYdense.max()) + 1)

    im = im[int(round(minY-1)):int(round(maxY)),
            int(round(minX-1)):int(round(maxX))]

    assert XXdense.shape == YYdense.shape
    out_shape = XXdense.shape
    coordinates = [
        (YYdense - minY).reshape(-1),
        (XXdense - minX).reshape(-1),
    ]
    im_warp = np.stack([
        map_coordinates(im[..., c], coordinates, order=1).reshape(out_shape)
        for c in range(im.shape[-1])],
        axis=-1)

    return im_warp 
Example 9
Project: HorizonNet   Author: sunset1995   File: pano_lsd_align.py    License: MIT License 6 votes vote down vote up
def paintParameterLine(parameterLine, width, height):
    lines = parameterLine.copy()
    panoEdgeC = np.zeros((height, width))

    num_sample = max(height, width)
    for i in range(len(lines)):
        n = lines[i, :3]
        sid = lines[i, 4] * 2 * np.pi
        eid = lines[i, 5] * 2 * np.pi
        if eid < sid:
            x = np.linspace(sid, eid + 2 * np.pi, num_sample)
            x = x % (2 * np.pi)
        else:
            x = np.linspace(sid, eid, num_sample)
        u = -np.pi + x.reshape(-1, 1)
        v = computeUVN(n, u, lines[i, 3])
        xyz = uv2xyzN(np.hstack([u, v]), lines[i, 3])
        uv = xyz2uvN(xyz, 1)
        m = np.minimum(np.floor((uv[:,0] + np.pi) / (2 * np.pi) * width) + 1,
            width).astype(np.int32)
        n = np.minimum(np.floor(((np.pi / 2) - uv[:, 1]) / np.pi * height) + 1,
            height).astype(np.int32)
        panoEdgeC[n-1, m-1] = i

    return panoEdgeC 
Example 10
Project: dataflow   Author: tensorpack   File: deform.py    License: Apache License 2.0 6 votes vote down vote up
def np_sample(img, coords):
    # a numpy implementation of ImageSample layer
    coords = np.maximum(coords, 0)
    coords = np.minimum(coords, np.array([img.shape[0] - 1, img.shape[1] - 1]))

    lcoor = np.floor(coords).astype('int32')
    ucoor = lcoor + 1
    ucoor = np.minimum(ucoor, np.array([img.shape[0] - 1, img.shape[1] - 1]))
    diff = coords - lcoor
    neg_diff = 1.0 - diff

    lcoory, lcoorx = np.split(lcoor, 2, axis=2)
    ucoory, ucoorx = np.split(ucoor, 2, axis=2)
    diff = np.repeat(diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
    neg_diff = np.repeat(neg_diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
    diffy, diffx = np.split(diff, 2, axis=2)
    ndiffy, ndiffx = np.split(neg_diff, 2, axis=2)

    ret = img[lcoory, lcoorx, :] * ndiffx * ndiffy + \
        img[ucoory, ucoorx, :] * diffx * diffy + \
        img[lcoory, ucoorx, :] * ndiffy * diffx + \
        img[ucoory, lcoorx, :] * diffy * ndiffx
    return ret[:, :, 0, :] 
Example 11
Project: NeuroKit   Author: neuropsychology   File: ecg_simulate.py    License: MIT License 6 votes vote down vote up
def _ecg_simulate_derivsecgsyn(t, x, rr, ti, sfint, ai, bi):

    ta = math.atan2(x[1], x[0])
    r0 = 1
    a0 = 1.0 - np.sqrt(x[0] ** 2 + x[1] ** 2) / r0

    ip = np.floor(t * sfint).astype(int)
    w0 = 2 * np.pi / rr[min(ip, len(rr) - 1)]
    # w0 = 2*np.pi/rr[ip[ip <= np.max(rr)]]

    fresp = 0.25
    zbase = 0.005 * np.sin(2 * np.pi * fresp * t)

    dx1dt = a0 * x[0] - w0 * x[1]
    dx2dt = a0 * x[1] + w0 * x[0]

    # matlab rem and numpy rem are different
    # dti = np.remainder(ta - ti, 2*np.pi)
    dti = (ta - ti) - np.round((ta - ti) / 2 / np.pi) * 2 * np.pi
    dx3dt = -np.sum(ai * dti * np.exp(-0.5 * (dti / bi) ** 2)) - 1 * (x[2] - zbase)

    dxdt = np.array([dx1dt, dx2dt, dx3dt])
    return dxdt 
Example 12
Project: me-ica   Author: ME-ICA   File: tedana.py    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
def scoreatpercentile(a, per, limit=(), interpolation_method='lower'):
    """
    This function is grabbed from scipy

    """
    values = np.sort(a, axis=0)
    if limit:
        values = values[(limit[0] <= values) & (values <= limit[1])]

    idx = per /100. * (values.shape[0] - 1)
    if (idx % 1 == 0):
        score = values[int(idx)]
    else:
        if interpolation_method == 'fraction':
            score = _interpolate(values[int(idx)], values[int(idx) + 1],
                                 idx % 1)
        elif interpolation_method == 'lower':
            score = values[int(np.floor(idx))]
        elif interpolation_method == 'higher':
            score = values[int(np.ceil(idx))]
        else:
            raise ValueError("interpolation_method can only be 'fraction', " \
                             "'lower' or 'higher'")
    return score 
Example 13
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda()

        for roi_ind, roi in enumerate(rois):
            batch_ind = int(roi[0].data[0])
            roi_start_w, roi_start_h, roi_end_w, roi_end_h = np.round(
                roi[1:].data.cpu().numpy() * self.spatial_scale).astype(int)
            roi_width = max(roi_end_w - roi_start_w + 1, 1)
            roi_height = max(roi_end_h - roi_start_h + 1, 1)
            bin_size_w = float(roi_width) / float(self.pooled_width)
            bin_size_h = float(roi_height) / float(self.pooled_height)

            for ph in range(self.pooled_height):
                hstart = int(np.floor(ph * bin_size_h))
                hend = int(np.ceil((ph + 1) * bin_size_h))
                hstart = min(data_height, max(0, hstart + roi_start_h))
                hend = min(data_height, max(0, hend + roi_start_h))
                for pw in range(self.pooled_width):
                    wstart = int(np.floor(pw * bin_size_w))
                    wend = int(np.ceil((pw + 1) * bin_size_w))
                    wstart = min(data_width, max(0, wstart + roi_start_w))
                    wend = min(data_width, max(0, wend + roi_start_w))

                    is_empty = (hend <= hstart) or(wend <= wstart)
                    if is_empty:
                        outputs[roi_ind, :, ph, pw] = 0
                    else:
                        data = features[batch_ind]
                        outputs[roi_ind, :, ph, pw] = torch.max(
                            torch.max(data[:, hstart:hend, wstart:wend], 1)[0], 2)[0].view(-1)

        return outputs 
Example 14
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    License: MIT License 5 votes vote down vote up
def create_from_images(tfrecord_dir, image_dir, label_dir, shuffle):
    print('Loading images from "%s"' % image_dir)
    image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
    if len(image_filenames) == 0:
        error('No input images found')
        
    img = np.asarray(PIL.Image.open(image_filenames[0]))
    resolution = img.shape[0]
    channels = img.shape[2] if img.ndim == 3 else 1
    if img.shape[1] != resolution:
        error('Input images must have the same width and height')
    if resolution != 2 ** int(np.floor(np.log2(resolution))):
        error('Input image resolution must be a power-of-two')
    if channels not in [1, 3]:
        error('Input images must be stored as RGB or grayscale')

    try:
        with open(label_dir, 'rb') as file:
            labels = pickle.load(file)
    except:
        error('Label file was not found')
    
    with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
        order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
        reordered_names = []
        for idx in range(order.size):
            image_filename = image_filenames[order[idx]]
            img = np.asarray(PIL.Image.open(image_filename))
            if channels == 1:
                img = img[np.newaxis, :, :] # HW => CHW
            else:
                img = img.transpose(2, 0, 1) # HWC => CHW
            tfr.add_image(img)
            reordered_names.append(os.path.basename(image_filename))
        reordered_labels = []
        for key in reordered_names:
            reordered_labels += [labels[key]]
        reordered_labels = np.stack(reordered_labels, 0)
        tfr.add_labels(reordered_labels)

#---------------------------------------------------------------------------- 
Example 15
Project: disentangling_conditional_gans   Author: zalandoresearch   File: train.py    License: MIT License 5 votes vote down vote up
def process_reals(x, lod, mirror_augment, drange_data, drange_net):
    with tf.name_scope('ProcessReals'):
        with tf.name_scope('DynamicRange'):
            x = tf.cast(x, tf.float32)
            x = misc.adjust_dynamic_range(x, drange_data, drange_net)
        if mirror_augment:
            with tf.name_scope('MirrorAugment'):
                s = tf.shape(x)
                mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
                mask = tf.tile(mask, [1, s[1], s[2], s[3]])
                x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
        with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
            s = tf.shape(x)
            y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
            y = tf.reduce_mean(y, axis=[3, 5], keep_dims=True)
            y = tf.tile(y, [1, 1, 1, 2, 1, 2])
            y = tf.reshape(y, [-1, s[1], s[2], s[3]])
            x = tfutil.lerp(x, y, lod - tf.floor(lod))
        with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
            s = tf.shape(x)
            factor = tf.cast(2 ** tf.floor(lod), tf.int32)
            x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
            x = tf.tile(x, [1, 1, 1, factor, 1, factor])
            x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x

#----------------------------------------------------------------------------
# Just-in-time processing of masks before feeding them to the networks. 
Example 16
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset.py    License: MIT License 5 votes vote down vote up
def configure(self, minibatch_size, lod=0):
        lod = int(np.floor(lod))
        assert minibatch_size >= 1 and lod in self._tf_datasets
        if self._cur_minibatch != minibatch_size or self._cur_lod != lod:
            self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})
            self._cur_minibatch = minibatch_size
            self._cur_lod = lod

    # Get next minibatch as TensorFlow expressions. 
Example 17
Project: dustmaps   Author: gregreen   File: leike_ensslin_2019.py    License: GNU General Public License v2.0 5 votes vote down vote up
def _coords2idx(self, coords):
        c = coords.transform_to('galactic').represent_as('cartesian')
        
        idx = np.empty((3,) + c.shape, dtype='i4')
        mask = np.zeros(c.shape, dtype=np.bool)

        for i,x in enumerate((c.x, c.y, c.z)):
            idx[i,...] = np.floor(x.to('pc').value + 300) * 256/600.
            mask |= (idx[i] < 0) | (idx[i] >= self._shape[i])

        for i in range(3):
            idx[i, mask] = -1

        return idx, mask 
Example 18
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: bucket_io.py    License: Apache License 2.0 5 votes vote down vote up
def make_data_iter_plan(self):
        "make a random data iteration plan"
        # truncate each bucket into multiple of batch-size
        bucket_n_batches = []
        for i in range(len(self.data)):
            bucket_n_batches.append(np.floor((self.data[i]) / self.batch_size))
            self.data[i] = self.data[i][:int(bucket_n_batches[i]*self.batch_size)]

        bucket_plan = np.hstack([np.zeros(n, int)+i for i, n in enumerate(bucket_n_batches)])
        np.random.shuffle(bucket_plan)

        bucket_idx_all = [np.random.permutation(len(x)) for x in self.data]

        self.bucket_plan = bucket_plan
        self.bucket_idx_all = bucket_idx_all
        self.bucket_curr_idx = [0 for x in self.data]

        self.data_buffer = []
        self.label_buffer = []
        for i_bucket in range(len(self.data)):
            if not self.model_parallel:
                data = np.zeros((self.batch_size, self.buckets[i_bucket]))
                label = np.zeros((self.batch_size, self.buckets[i_bucket]))
                self.data_buffer.append(data)
                self.label_buffer.append(label)
            else:
                data = np.zeros((self.buckets[i_bucket], self.batch_size))
                self.data_buffer.append(data)

        if self.model_parallel:
            # Transpose data if model parallel
            for i in range(len(self.data)):
                bucket_data = self.data[i]
                self.data[i] = np.transpose(bucket_data) 
Example 19
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: net.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride):
        super(ConvLayer, self).__init__()
        padding = int(np.floor(kernel_size / 2))
        self.pad = ReflectancePadding(pad_width=(0,0,0,0,padding,padding,padding,padding))
        self.conv2d = nn.Conv2D(in_channels=in_channels, channels=out_channels, 
                                kernel_size=kernel_size, strides=(stride,stride),
                                padding=0) 
Example 20
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: net.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, 
            stride, upsample=None):
        super(UpsampleConvLayer, self).__init__()
        self.upsample = upsample
        self.reflection_padding = int(np.floor(kernel_size / 2))
        self.conv2d = nn.Conv2D(in_channels=in_channels, 
                                channels=out_channels, 
                                kernel_size=kernel_size, strides=(stride,stride),
                                padding=self.reflection_padding) 
Example 21
Project: DOTA_models   Author: ringringyi   File: map_utils.py    License: Apache License 2.0 5 votes vote down vote up
def _get_xy_bounding_box(vertex, padding):
  """Returns the xy bounding box of the environment."""
  min_ = np.floor(np.min(vertex[:, :2], axis=0) - padding).astype(np.int)
  max_ = np.ceil(np.max(vertex[:, :2], axis=0) + padding).astype(np.int)
  return min_, max_ 
Example 22
Project: DOTA_models   Author: ringringyi   File: map_utils.py    License: Apache License 2.0 5 votes vote down vote up
def generate_goal_images(map_scales, map_crop_sizes, n_ori, goal_dist,
                         goal_theta, rel_goal_orientation):
  goal_dist = goal_dist[:,0]
  goal_theta = goal_theta[:,0]
  rel_goal_orientation = rel_goal_orientation[:,0]

  goals = [];
  # Generate the map images.
  for i, (sc, map_crop_size) in enumerate(zip(map_scales, map_crop_sizes)):
    goal_i = np.zeros((goal_dist.shape[0], map_crop_size, map_crop_size, n_ori),
                      dtype=np.float32)
    x = goal_dist*np.cos(goal_theta)*sc + (map_crop_size-1.)/2.
    y = goal_dist*np.sin(goal_theta)*sc + (map_crop_size-1.)/2.

    for j in range(goal_dist.shape[0]):
      gc = rel_goal_orientation[j]
      x0 = np.floor(x[j]).astype(np.int32); x1 = x0 + 1;
      y0 = np.floor(y[j]).astype(np.int32); y1 = y0 + 1;
      if x0 >= 0 and x0 <= map_crop_size-1:
        if y0 >= 0 and y0 <= map_crop_size-1:
          goal_i[j, y0, x0, gc] = (x1-x[j])*(y1-y[j])
        if y1 >= 0 and y1 <= map_crop_size-1:
          goal_i[j, y1, x0, gc] = (x1-x[j])*(y[j]-y0)

      if x1 >= 0 and x1 <= map_crop_size-1:
        if y0 >= 0 and y0 <= map_crop_size-1:
          goal_i[j, y0, x1, gc] = (x[j]-x0)*(y1-y[j])
        if y1 >= 0 and y1 <= map_crop_size-1:
          goal_i[j, y1, x1, gc] = (x[j]-x0)*(y[j]-y0)

    goals.append(goal_i)
  return goals 
Example 23
Project: IntroToDeepLearning   Author: robb-brown   File: ccseg.py    License: MIT License 5 votes vote down vote up
def computePad(dims,depth):
	y1=y2=x1=x2=0; 
	y,x = [numpy.ceil(dims[i]/float(2**depth)) * (2**depth) for i in range(-2,0)]
	x = float(x); y = float(y);
	y1 = int(numpy.floor((y - dims[-2])/2)); y2 = int(numpy.ceil((y - dims[-2])/2))
	x1 = int(numpy.floor((x - dims[-1])/2)); x2 = int(numpy.ceil((x - dims[-1])/2))
	return y1,y2,x1,x2 
Example 24
Project: cs294-112_hws   Author: xuwd11   File: pointmass.py    License: MIT License 5 votes vote down vote up
def preprocess(self, state):
        scaled_state = self.scale * state
        x_floor, y_floor = np.floor(scaled_state)
        assert x_floor <= self.scale
        assert y_floor <= self.scale
        if x_floor == self.scale:
            x_floor -= 1
        if y_floor == self.scale:
            y_floor -= 1
        index = self.scale*x_floor + y_floor
        return index 
Example 25
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: roi_pool_py.py    License: MIT License 5 votes vote down vote up
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda()

        for roi_ind, roi in enumerate(rois):
            batch_ind = int(roi[0].data[0])
            roi_start_w, roi_start_h, roi_end_w, roi_end_h = np.round(
                roi[1:].data.cpu().numpy() * self.spatial_scale).astype(int)
            roi_width = max(roi_end_w - roi_start_w + 1, 1)
            roi_height = max(roi_end_h - roi_start_h + 1, 1)
            bin_size_w = float(roi_width) / float(self.pooled_width)
            bin_size_h = float(roi_height) / float(self.pooled_height)

            for ph in range(self.pooled_height):
                hstart = int(np.floor(ph * bin_size_h))
                hend = int(np.ceil((ph + 1) * bin_size_h))
                hstart = min(data_height, max(0, hstart + roi_start_h))
                hend = min(data_height, max(0, hend + roi_start_h))
                for pw in range(self.pooled_width):
                    wstart = int(np.floor(pw * bin_size_w))
                    wend = int(np.ceil((pw + 1) * bin_size_w))
                    wstart = min(data_width, max(0, wstart + roi_start_w))
                    wend = min(data_width, max(0, wend + roi_start_w))

                    is_empty = (hend <= hstart) or(wend <= wstart)
                    if is_empty:
                        outputs[roi_ind, :, ph, pw] = 0
                    else:
                        data = features[batch_ind]
                        outputs[roi_ind, :, ph, pw] = torch.max(
                            torch.max(data[:, hstart:hend, wstart:wend], 1)[0], 2)[0].view(-1)

        return outputs 
Example 26
Project: EXOSIMS   Author: dsavransky   File: keplerSTM.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, x0, mu, epsmult = 4.0, prefVallado = False, noc = False):
        #determine number of planets and validate input
        nplanets = x0.size/6.
        if (nplanets - np.floor(nplanets) > 0):
            raise Exception('The length of x0 must be a multiple of 6.')
        
        if (mu.size != nplanets):
            raise Exception('The length of mu must be the length of x0 divided by 6')
        
        self.nplanets = int(nplanets)
        self.mu = np.squeeze(mu)
        if (self.mu.size == 1):
            self.mu = np.array(mu)
        
        self.epsmult = epsmult
        
        if prefVallado:
            self.algOrder = [self.calcSTM_vallado, self.calcSTM]
        else:
            self.algOrder = [self.calcSTM, self.calcSTM_vallado]

        #create position and velocity index matrices
        tmp = np.reshape(np.arange(self.nplanets*6),(self.nplanets,6)).T
        self.rinds = tmp[0:3]
        self.vinds = tmp[3:6]

        if not(noc) and ('EXOSIMS.util.KeplerSTM_C.CyKeplerSTM' in sys.modules):
            self.havec = True
        else:
            self.havec = False

        self.updateState(np.squeeze(x0)) 
Example 27
Project: EXOSIMS   Author: dsavransky   File: keplerSTM_indprop.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def calcSTM(self,dt,j):
        #allocate
        u = 0
        deltaU = 0
        t = 0
        counter = 0
        
        #For elliptic orbits, calculate period effects
        if self.beta[j] >0:
            P = 2*np.pi*self.mu[j]*self.beta[j]**(-3./2.)
            n = np.floor((dt + P/2 - 2*self.nu0[j]/self.beta[j])/P)
            deltaU = 2*np.pi*n*self.beta[j]**(-5./2.)
        
        #loop until convergence of the time array to the time step
        while (np.max(np.abs(t-dt)) > self.epsmult*np.spacing(dt)) and (counter < 1000):
            q = self.beta[j]*u**2./(1+self.beta[j]*u**2.)
            U0w2 = 1. - 2.*q
            U1w2 = 2.*(1.-q)*u
            temp = self.contFrac(q)
            U = 16./15.*U1w2**5.*temp + deltaU
            U0 = 2.*U0w2**2.-1.
            U1 = 2.*U0w2*U1w2
            U2 = 2.*U1w2**2.
            U3 = self.beta[j]*U + U1*U2/3.
            r = self.r0norm[j]*U0 + self.nu0[j]*U1 + self.mu[j]*U2
            t = self.r0norm[j]*U1 + self.nu0[j]*U2 + self.mu[j]*U3
            u = u - (t-dt)/(4.*(1.-q)*r)
            counter += 1
        
        if (counter == 1000):
            raise ValueError('Failed to converge on t: %e/%e'%(np.max(np.abs(t-dt)), self.epsmult*np.spacing(dt)))
        
        #Kepler solution
        f = 1 - self.mu[j]/self.r0norm[j]*U2
        g = self.r0norm[j]*U1 + self.nu0[j]*U2
        F = -self.mu[j]*U1/r/self.r0norm[j]
        G = 1 - self.mu[j]/r*U2
        
        Phi = np.vstack((np.hstack((np.eye(3)*f, np.eye(3)*g)),np.hstack((np.eye(3)*F, np.eye(3)*G))))
       
        return Phi 
Example 28
Project: RingNet   Author: soubhiksanyal   File: image.py    License: MIT License 5 votes vote down vote up
def resize_img(img, scale_factor):
    new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
    new_img = cv2.resize(img, (new_size[1], new_size[0]))
    # This is scale factor of [height, width] i.e. [y, x]
    actual_factor = [
        new_size[0] / float(img.shape[0]), new_size[1] / float(img.shape[1])
    ]
    return new_img, actual_factor 
Example 29
Project: HandsOn-Unsupervised-Learning-with-Python   Author: PacktPublishing   File: som.py    License: MIT License 5 votes vote down vote up
def winning_unit(xt):
    global W
    distances = np.linalg.norm(W - xt, ord=2, axis=2)
    max_activation_unit = np.argmax(distances)
    return int(np.floor(max_activation_unit / matrix_side)), max_activation_unit % matrix_side 
Example 30
Project: DeepLung   Author: uci-cbcl   File: transforms.py    License: GNU General Public License v3.0 5 votes vote down vote up
def resample1d(inp,inp_space,out_space=1):
    #Output shape
    print inp.size(), inp_space, out_space
    out_shape = list(np.int64(inp.size()[:-1]))+[int(np.floor(inp.size()[-1]*inp_space/out_space))] #Optional for if we expect a float_tensor
    out_shape = [int(item) for item in out_shape]
    # Get output coordinates, deltas, and t (chord distances)
    # torch.cuda.set_device(inp.get_device())
    # Output coordinates in real space
    coords = torch.cuda.HalfTensor(range(out_shape[-1]))*out_space
    delta = coords.fmod(inp_space).div(inp_space).repeat(out_shape[0],out_shape[1],1)
    t = torch.cuda.HalfTensor(4,out_shape[0],out_shape[1],out_shape[2]).zero_()
    t[0] = 1
    t[1] = delta
    t[2] = delta**2
    t[3] = delta**3    
    # Nearest neighbours indices
    nn = coords.div(inp_space).floor().long()    
    # Stack the nearest neighbors into P, the Points Array
    P = torch.cuda.HalfTensor(4,out_shape[0],out_shape[1],out_shape[2]).zero_()
    for i in range(-1,3):
        P[i+1] = inp.index_select(2,torch.clamp(nn+i,0,inp.size()[-1]-1))    
    #Take catmull-rom  spline interpolation:
    return 0.5*t.mul(torch.cuda.HalfTensor([[ 0,  2,  0,  0],
                            [-1,  0,  1,  0],
                            [ 2, -5,  4, -1],
                            [ -1, 3, -3,  1]]).mm(P.view(4,-1))\
                                                              .view(4,
                                                                    out_shape[0],
                                                                    out_shape[1],
                                                                    out_shape[2]))\
                                                              .sum(0)\
                                                              .squeeze()