Python numpy.remainder() Examples

The following are 30 code examples of numpy.remainder(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: ecg_simulate.py    From NeuroKit with MIT License 6 votes vote down vote up
def _ecg_simulate_derivsecgsyn(t, x, rr, ti, sfint, ai, bi):

    ta = math.atan2(x[1], x[0])
    r0 = 1
    a0 = 1.0 - np.sqrt(x[0] ** 2 + x[1] ** 2) / r0

    ip = np.floor(t * sfint).astype(int)
    w0 = 2 * np.pi / rr[min(ip, len(rr) - 1)]
    # w0 = 2*np.pi/rr[ip[ip <= np.max(rr)]]

    fresp = 0.25
    zbase = 0.005 * np.sin(2 * np.pi * fresp * t)

    dx1dt = a0 * x[0] - w0 * x[1]
    dx2dt = a0 * x[1] + w0 * x[0]

    # matlab rem and numpy rem are different
    # dti = np.remainder(ta - ti, 2*np.pi)
    dti = (ta - ti) - np.round((ta - ti) / 2 / np.pi) * 2 * np.pi
    dx3dt = -np.sum(ai * dti * np.exp(-0.5 * (dti / bi) ** 2)) - 1 * (x[2] - zbase)

    dxdt = np.array([dx1dt, dx2dt, dx3dt])
    return dxdt 
Example #2
Source File: encoder.py    From mhfp with MIT License 6 votes vote down vote up
def from_sparse_array(self, array):
        """Creates the hash set for a sparse binary array and returns it without changing the hash
    values of this instance. This is useful when a molecular shingling is already hashed.
    
    Arguments:
      s {numpy.ndarray} -- A sparse binary array.
    
    Returns:
      numpy.ndarray -- An array containing the hash values.
    """

        hash_values = np.zeros([self.n_permutations, 1], dtype=np.uint32)
        hash_values.fill(MHFPEncoder.max_hash)

        for i in array:
            hashes = np.remainder(
                np.remainder(
                    self.permutations_a * i + self.permutations_b, MHFPEncoder.prime
                ),
                self.max_hash,
            )
            hash_values = np.minimum(hash_values, hashes)

        return hash_values.reshape((1, self.n_permutations))[0] 
Example #3
Source File: encoder.py    From mhfp with MIT License 6 votes vote down vote up
def from_molecular_shingling(self, tokens):
        """Creates the hash set for a string array and returns it without changing the hash values of
    this instance.
    
    Arguments:
      a {numpy.ndarray} -- A string array.
    
    Returns:
      numpy.ndarray -- An array containing the hash values.
    """

        hash_values = np.zeros([self.n_permutations, 1], dtype=np.uint32)
        hash_values.fill(MHFPEncoder.max_hash)

        for t in tokens:
            t_h = struct.unpack("<I", sha1(t).digest()[:4])[0]
            hashes = np.remainder(
                np.remainder(
                    self.permutations_a * t_h + self.permutations_b, MHFPEncoder.prime
                ),
                self.max_hash,
            )
            hash_values = np.minimum(hash_values, hashes)

        return hash_values.reshape((1, self.n_permutations))[0] 
Example #4
Source File: transformers.py    From PADME with MIT License 6 votes vote down vote up
def get_cdf_values(array, bins):
  # array = np.transpose(array)
  n_rows = array.shape[0]
  n_cols = array.shape[1]
  array_t = np.zeros((n_rows, n_cols))
  parts = n_rows / bins
  hist_values = np.zeros(n_rows)
  sorted_hist_values = np.zeros(n_rows)
  for row in range(n_rows):
    if np.remainder(bins, 2) == 1:
      hist_values[row] = np.floor(np.divide(row, parts)) / (bins - 1)
    else:
      hist_values[row] = np.floor(np.divide(row, parts)) / bins
  for col in range(n_cols):
    order = np.argsort(array[:, col], axis=0)
    sorted_hist_values = hist_values[order]
    array_t[:, col] = sorted_hist_values

  return array_t 
Example #5
Source File: transformers.py    From deepchem with MIT License 6 votes vote down vote up
def get_cdf_values(array, bins):
  # array = np.transpose(array)
  n_rows = array.shape[0]
  n_cols = array.shape[1]
  array_t = np.zeros((n_rows, n_cols))
  parts = n_rows / bins
  hist_values = np.zeros(n_rows)
  sorted_hist_values = np.zeros(n_rows)
  for row in range(n_rows):
    if np.remainder(bins, 2) == 1:
      hist_values[row] = np.floor(np.divide(row, parts)) / (bins - 1)
    else:
      hist_values[row] = np.floor(np.divide(row, parts)) / bins
  for col in range(n_cols):
    order = np.argsort(array[:, col], axis=0)
    sorted_hist_values = hist_values[order]
    array_t[:, col] = sorted_hist_values

  return array_t 
Example #6
Source File: encoder.py    From mhfp with MIT License 6 votes vote down vote up
def from_binary_array(self, array):
        """Creates the hash set for a binary array and returns it without changing the hash
    values of this instance. This is useful to minhash a folded fingerprint.
    
    Arguments:
      s {numpy.ndarray} -- A sparse binary array.
    
    Returns:
      numpy.ndarray -- A binary array.
    """

        hash_values = np.zeros([self.n_permutations, 1], dtype=np.uint32)
        hash_values.fill(MHFPEncoder.max_hash)

        for i, v in enumerate(array):
            if v == 1:
                hashes = np.remainder(
                    np.remainder(
                        self.permutations_a * i + self.permutations_b, MHFPEncoder.prime
                    ),
                    self.max_hash,
                )
                hash_values = np.minimum(hash_values, hashes)

        return hash_values.reshape((1, self.n_permutations))[0] 
Example #7
Source File: video_sampler.py    From dmc-net with MIT License 6 votes vote down vote up
def sampling(self, range_max, v_id=None, prev_failed=False):
        assert range_max > 0, \
            ValueError("range_max = {}".format(range_max))
        interval = self.rng.choice(self.interval)
        if self.num == 1:
            return [self.rng.choice(range(0, range_max))]
        # sampling
        speed_min = self.speed[0]
        speed_max = min(self.speed[1], (range_max-1)/((self.num-1)*interval))
        if speed_max < speed_min:
            return (np.remainder(np.arange(0, self.num * interval, interval), range_max)).tolist()
        random_interval = self.rng.uniform(speed_min, speed_max) * interval
        frame_range = (self.num-1) * random_interval
        clip_start = self.rng.uniform(0, (range_max-1) - frame_range)
        clip_end = clip_start + frame_range
        return np.linspace(clip_start, clip_end, self.num).astype(dtype=np.int).tolist() 
Example #8
Source File: test_umath.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def test_remainder_basic(self):
        dt = np.typecodes['AllInteger'] + np.typecodes['Float']
        for dt1, dt2 in itertools.product(dt, dt):
            for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
                if sg1 == -1 and dt1 in np.typecodes['UnsignedInteger']:
                    continue
                if sg2 == -1 and dt2 in np.typecodes['UnsignedInteger']:
                    continue
                fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s'
                msg = fmt % (dt1, dt2, sg1, sg2)
                a = np.array(sg1*71, dtype=dt1)
                b = np.array(sg2*19, dtype=dt2)
                div = np.floor_divide(a, b)
                rem = np.remainder(a, b)
                assert_equal(div*b + rem, a, err_msg=msg)
                if sg2 == -1:
                    assert_(b < rem <= 0, msg)
                else:
                    assert_(b > rem >= 0, msg) 
Example #9
Source File: test_umath.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def test_float_remainder_exact(self):
        # test that float results are exact for small integers. This also
        # holds for the same integers scaled by powers of two.
        nlst = list(range(-127, 0))
        plst = list(range(1, 128))
        dividend = nlst + [0] + plst
        divisor = nlst + plst
        arg = list(itertools.product(dividend, divisor))
        tgt = list(divmod(*t) for t in arg)

        a, b = np.array(arg, dtype=int).T
        # convert exact integer results from Python to float so that
        # signed zero can be used, it is checked.
        tgtdiv, tgtrem = np.array(tgt, dtype=float).T
        tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
        tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)

        for dt in np.typecodes['Float']:
            msg = 'dtype: %s' % (dt,)
            fa = a.astype(dt)
            fb = b.astype(dt)
            div = np.floor_divide(fa, fb)
            rem = np.remainder(fa, fb)
            assert_equal(div, tgtdiv, err_msg=msg)
            assert_equal(rem, tgtrem, err_msg=msg) 
Example #10
Source File: test_umath.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def test_float_remainder_roundoff(self):
        # gh-6127
        dt = np.typecodes['Float']
        for dt1, dt2 in itertools.product(dt, dt):
            for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
                fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s'
                msg = fmt % (dt1, dt2, sg1, sg2)
                a = np.array(sg1*78*6e-8, dtype=dt1)
                b = np.array(sg2*6e-8, dtype=dt2)
                div = np.floor_divide(a, b)
                rem = np.remainder(a, b)
                # Equal assertion should hold when fmod is used
                assert_equal(div*b + rem, a, err_msg=msg)
                if sg2 == -1:
                    assert_(b < rem <= 0, msg)
                else:
                    assert_(b > rem >= 0, msg) 
Example #11
Source File: video_sampler.py    From dmc-net with MIT License 6 votes vote down vote up
def sampling(self, range_max, v_id, prev_failed=False):
        assert range_max > 0, \
            ValueError("range_max = {}".format(range_max))
        num = self.num
        interval = self.rng.choice(self.interval)
        frame_range = (num - 1) * interval + 1
        # sampling clips
        if v_id not in self.memory:
            clips = list(range(0, range_max-(frame_range-1), frame_range))
            if self.shuffle:
                self.rng.shuffle(clips)
            self.memory[v_id] = [-1, clips]
        # pickup a clip
        cursor, clips = self.memory[v_id]
        if not clips:
            return (np.remainder(np.arange(0, self.num * interval, interval), range_max)).tolist()
        cursor = (cursor + 1) % len(clips)
        if prev_failed or not self.fix_cursor:
            self.memory[v_id][0] = cursor
        # sampling within clip
        idxs = range(clips[cursor], clips[cursor]+frame_range, interval)
        return idxs 
Example #12
Source File: magphase.py    From magphase with Apache License 2.0 6 votes vote down vote up
def frame_to_state_mapping(shift_file, lab_file, fs, states_per_phone=5):
    #Read files:
    v_shift = lu.read_binfile(shift_file, dim=1)
    v_pm = la.shift_to_pm(v_shift)
    m_state_times = np.loadtxt(lab_file, usecols=(0,1))    
    
    # to miliseconds:
    v_pm_ms = 1000 * v_pm / fs
    m_state_times_ms = m_state_times / 10000.0    
    
    # Compare:
    nfrms = len(v_pm_ms)
    v_st = np.zeros(nfrms) - 1 # init
    for f in xrange(nfrms):
        vb_greater = (v_pm_ms[f] >= m_state_times_ms[:,0])  # * (v_pm_ms[f] <  m_state_times_ms[:,1])
        state_nx   = np.where(vb_greater)[0][-1]
        v_st[f]    = np.remainder(state_nx, states_per_phone)
    return v_st
    
#============================================================================== 
Example #13
Source File: generator.py    From 3D-CNNs-for-Liver-Classification with Apache License 2.0 5 votes vote down vote up
def get_number_of_steps(n_samples, batch_size):
    if n_samples <= batch_size:
        return n_samples
    elif np.remainder(n_samples, batch_size) == 0:
        return n_samples//batch_size
    else:
        return n_samples//batch_size + 1 
Example #14
Source File: generator.py    From 3DUnetCNN with MIT License 5 votes vote down vote up
def get_number_of_steps(n_samples, batch_size):
    if n_samples <= batch_size:
        return n_samples
    elif np.remainder(n_samples, batch_size) == 0:
        return n_samples//batch_size
    else:
        return n_samples//batch_size + 1 
Example #15
Source File: test_umath.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_float_remainder_corner_cases(self):
        # Check remainder magnitude.
        for dt in np.typecodes['Float']:
            b = np.array(1.0, dtype=dt)
            a = np.nextafter(np.array(0.0, dtype=dt), -b)
            rem = np.remainder(a, b)
            assert_(rem <= b, 'dt: %s' % dt)
            rem = np.remainder(-a, -b)
            assert_(rem >= -b, 'dt: %s' % dt)

        # Check nans, inf
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning, "invalid value encountered in remainder")
            for dt in np.typecodes['Float']:
                fone = np.array(1.0, dtype=dt)
                fzer = np.array(0.0, dtype=dt)
                finf = np.array(np.inf, dtype=dt)
                fnan = np.array(np.nan, dtype=dt)
                rem = np.remainder(fone, fzer)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
                # MSVC 2008 returns NaN here, so disable the check.
                #rem = np.remainder(fone, finf)
                #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
                rem = np.remainder(fone, fnan)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
                rem = np.remainder(finf, fone)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) 
Example #16
Source File: test_umath.py    From coffeegrindsize with MIT License 5 votes vote down vote up
def test_float_remainder_corner_cases(self):
        # Check remainder magnitude.
        for dt in np.typecodes['Float']:
            b = np.array(1.0, dtype=dt)
            a = np.nextafter(np.array(0.0, dtype=dt), -b)
            rem = np.remainder(a, b)
            assert_(rem <= b, 'dt: %s' % dt)
            rem = np.remainder(-a, -b)
            assert_(rem >= -b, 'dt: %s' % dt)

        # Check nans, inf
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning, "invalid value encountered in remainder")
            for dt in np.typecodes['Float']:
                fone = np.array(1.0, dtype=dt)
                fzer = np.array(0.0, dtype=dt)
                finf = np.array(np.inf, dtype=dt)
                fnan = np.array(np.nan, dtype=dt)
                rem = np.remainder(fone, fzer)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
                # MSVC 2008 returns NaN here, so disable the check.
                #rem = np.remainder(fone, finf)
                #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
                rem = np.remainder(fone, fnan)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
                rem = np.remainder(finf, fone)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) 
Example #17
Source File: cartpole.py    From bsuite with Apache License 2.0 5 votes vote down vote up
def step_cartpole(action: int,
                  timescale: float,
                  state: CartpoleState,
                  config: CartpoleConfig) -> CartpoleState:
  """Helper function to step cartpole state under given config."""
  # Unpack variables into "short" names for mathematical equation
  force = (action - 1) * config.force_mag
  cos = np.cos(state.theta)
  sin = np.sin(state.theta)
  pl = config.mass_pole * config.length
  l = config.length
  m_pole = config.mass_pole
  m_total = config.mass_cart + config.mass_pole
  g = config.gravity

  # Compute the physical evolution
  temp = (force + pl * state.theta_dot**2 * sin) / m_total
  theta_acc = (g * sin - cos * temp) / (l * (4/3 - m_pole * cos**2 / m_total))
  x_acc = temp - pl * theta_acc * cos / m_total

  # Update states according to discrete dynamics
  x = state.x + timescale * state.x_dot
  x_dot = state.x_dot + timescale * x_acc
  theta = np.remainder(
      state.theta + timescale * state.theta_dot, 2 * np.pi)
  theta_dot = state.theta_dot + timescale * theta_acc
  time_elapsed = state.time_elapsed + timescale

  return CartpoleState(x, x_dot, theta, theta_dot, time_elapsed) 
Example #18
Source File: transforms.py    From SpatioTemporalSegmentation with MIT License 5 votes vote down vote up
def __call__(self, coords, feats, labels):
    # Assume feat[:, :3] is rgb
    hsv = HueSaturationTranslation.rgb_to_hsv(feats[:, :3])
    hue_val = (random.random() - 0.5) * 2 * self.hue_max
    sat_ratio = 1 + (random.random() - 0.5) * 2 * self.saturation_max
    hsv[..., 0] = np.remainder(hue_val + hsv[..., 0] + 1, 1)
    hsv[..., 1] = np.clip(sat_ratio * hsv[..., 1], 0, 1)
    feats[:, :3] = np.clip(HueSaturationTranslation.hsv_to_rgb(hsv), 0, 255)

    return coords, feats, labels


##############################
# Coordinate transformations
############################## 
Example #19
Source File: functions.py    From safe_learning with MIT License 5 votes vote down vote up
def simplices(self, indices):
        """Return the simplices corresponding to the simplex index.

        Parameters
        ----------
        indices : ndarray
            The indices of the simpleces

        Returns
        -------
        simplices : ndarray
            Each row consists of the indices of the simplex corners.

        """
        # Get the indices inside the unit rectangle
        unit_indices = np.remainder(indices, self.triangulation.nsimplex)
        simplices = self.unit_simplices[unit_indices].copy()

        # Shift indices to corresponding rectangle
        rectangles = np.floor_divide(indices, self.triangulation.nsimplex)
        corner_index = self.discretization.rectangle_corner_index(rectangles)

        if simplices.ndim > 1:
            corner_index = corner_index[:, None]

        simplices += corner_index
        return simplices 
Example #20
Source File: test_umath.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def floor_divide_and_remainder(x, y):
    return (np.floor_divide(x, y), np.remainder(x, y)) 
Example #21
Source File: magphase.py    From magphase with Apache License 2.0 5 votes vote down vote up
def frame_to_state_mapping2(shift_file, state_lab_file, fs, states_per_phone=5, b_refine=True):
    #Read files:
    v_shift = lu.read_binfile(shift_file, dim=1)
    v_pm = la.shift_to_pm(v_shift)
    m_state_times = np.loadtxt(state_lab_file, usecols=(0,1))    
    
    # to miliseconds:
    v_pm_ms = 1000 * v_pm / fs
    m_state_times_ms = m_state_times / 10000.0    
    
    # Compare:
    nfrms = len(v_pm_ms)
    v_st = np.zeros(nfrms) - 1 # init
    for f in xrange(nfrms):
        vb_greater = (v_pm_ms[f] >= m_state_times_ms[:,0])  # * (v_pm_ms[f] <  m_state_times_ms[:,1])
        state_nx   = np.where(vb_greater)[0][-1]
        v_st[f]    = np.remainder(state_nx, states_per_phone)

        # Refining:
        if b_refine:
            state_len_ms = m_state_times_ms[state_nx,1] - m_state_times_ms[state_nx,0]
            fine_pos = ( v_pm_ms[f] - m_state_times_ms[state_nx,0] ) / state_len_ms
            v_st[f] += fine_pos 
            
    # Protection against wrong ended label files:
    np.clip(v_st, 0, states_per_phone, out=v_st)      
            
    return v_st
    
#============================================================================== 
Example #22
Source File: test_umath.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def test_float_remainder_corner_cases(self):
        # Check remainder magnitude.
        for dt in np.typecodes['Float']:
            b = np.array(1.0, dtype=dt)
            a = np.nextafter(np.array(0.0, dtype=dt), -b)
            rem = np.remainder(a, b)
            assert_(rem <= b, 'dt: %s' % dt)
            rem = np.remainder(-a, -b)
            assert_(rem >= -b, 'dt: %s' % dt)

        # Check nans, inf
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning, "invalid value encountered in remainder")
            for dt in np.typecodes['Float']:
                fone = np.array(1.0, dtype=dt)
                fzer = np.array(0.0, dtype=dt)
                finf = np.array(np.inf, dtype=dt)
                fnan = np.array(np.nan, dtype=dt)
                rem = np.remainder(fone, fzer)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
                # MSVC 2008 returns NaN here, so disable the check.
                #rem = np.remainder(fone, finf)
                #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
                rem = np.remainder(fone, fnan)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
                rem = np.remainder(finf, fone)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) 
Example #23
Source File: test_umath.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def floor_divide_and_remainder(x, y):
    return (np.floor_divide(x, y), np.remainder(x, y)) 
Example #24
Source File: instance_catalog.py    From gcr-catalogs with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _total_shape(a_bulge, b_bulge, theta_bulge, mag_bulge,
                 a_disk, b_disk, theta_disk, mag_disk, result='all'):

    Q_bulge = np.zeros((2, 2, len(mag_bulge)))
    Q_disk = np.zeros_like(Q_bulge)

    m = np.isfinite(mag_bulge)
    Q_bulge[:,:,m] = sersic_second_moments(4,
                                           np.sqrt(a_bulge[m]*b_bulge[m]),
                                           b_bulge[m]/a_bulge[m],
                                           np.deg2rad(theta_bulge[m]))
    m = np.isfinite(mag_disk)
    Q_disk[:,:,m] = sersic_second_moments(1,
                                          np.sqrt(a_disk[m]*b_disk[m]),
                                          a_disk[m]/b_disk[m],
                                          np.deg2rad(theta_disk[m]))

    f_bulge = _get_bulge_fraction(mag_bulge, mag_disk)
    Q_total = Q_bulge * f_bulge + Q_disk * (1.0 - f_bulge)
    a, b, beta, e1, e2 = np.array([moments_size_and_shape(Q_total[:,:,i]) for i in range(Q_total.shape[-1])]).T  # pylint: disable=unpacking-non-sequence
    beta = np.remainder(np.rad2deg(beta), 180.0)
    if result == 'a':
        return a
    if result == 'b':
        return b
    if result == 'beta':
        return beta
    if result == 'e1':
        return e1
    if result == 'e2':
        return e2
    return a, b, beta, e1, e2 
Example #25
Source File: buzzard.py    From gcr-catalogs with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _ellip2pa(e1, e2):
    return np.remainder(np.rad2deg(np.arctan2(e2, e1)/2.0), 180.0) 
Example #26
Source File: pairwise.py    From airlab with Apache License 2.0 5 votes vote down vote up
def _initialize(self):

        cp_grid = np.ceil(np.divide(self._image_size, self._stride)).astype(dtype=int)

        # new image size after convolution
        inner_image_size = np.multiply(self._stride, cp_grid) - (self._stride - 1)

        # add one control point at each side
        cp_grid = cp_grid + 2

        # image size with additional control points
        new_image_size = np.multiply(self._stride, cp_grid) - (self._stride - 1)

        # center image between control points
        image_size_diff = inner_image_size - self._image_size
        image_size_diff_floor = np.floor((np.abs(image_size_diff)/2))*np.sign(image_size_diff)

        self._crop_start = image_size_diff_floor + np.remainder(image_size_diff, 2)*np.sign(image_size_diff)
        self._crop_end = image_size_diff_floor

        cp_grid = [1, self._dim] + cp_grid.tolist()

        # create transformation parameters
        self.trans_parameters = Parameter(th.Tensor(*cp_grid))
        self.trans_parameters.data.fill_(0)

        # copy to gpu if needed
        self.to(dtype=self._dtype, device=self._device)

        # convert to integer
        self._padding = self._padding.astype(dtype=int).tolist()
        self._stride = self._stride.astype(dtype=int).tolist()

        self._crop_start = self._crop_start.astype(dtype=int)
        self._crop_end = self._crop_end.astype(dtype=int)

        size = [1, 1] + new_image_size.astype(dtype=int).tolist()
        self._displacement_tmp = th.empty(*size, dtype=self._dtype, device=self._device)

        size = [1, 1] + self._image_size.astype(dtype=int).tolist()
        self._displacement = th.empty(*size, dtype=self._dtype, device=self._device) 
Example #27
Source File: test_umath.py    From pySINDy with MIT License 5 votes vote down vote up
def test_float_remainder_corner_cases(self):
        # Check remainder magnitude.
        for dt in np.typecodes['Float']:
            b = np.array(1.0, dtype=dt)
            a = np.nextafter(np.array(0.0, dtype=dt), -b)
            rem = np.remainder(a, b)
            assert_(rem <= b, 'dt: %s' % dt)
            rem = np.remainder(-a, -b)
            assert_(rem >= -b, 'dt: %s' % dt)

        # Check nans, inf
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning, "invalid value encountered in remainder")
            for dt in np.typecodes['Float']:
                fone = np.array(1.0, dtype=dt)
                fzer = np.array(0.0, dtype=dt)
                finf = np.array(np.inf, dtype=dt)
                fnan = np.array(np.nan, dtype=dt)
                rem = np.remainder(fone, fzer)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
                # MSVC 2008 returns NaN here, so disable the check.
                #rem = np.remainder(fone, finf)
                #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
                rem = np.remainder(fone, fnan)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
                rem = np.remainder(finf, fone)
                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) 
Example #28
Source File: test_umath.py    From pySINDy with MIT License 5 votes vote down vote up
def floor_divide_and_remainder(x, y):
    return (np.floor_divide(x, y), np.remainder(x, y)) 
Example #29
Source File: layers.py    From cs231n-practice with MIT License 5 votes vote down vote up
def max_pool_backward_naive(dout, cache):
  """
  A naive implementation of the backward pass for a max pooling layer.

  Inputs:
  - dout: Upstream derivatives
  - cache: A tuple of (x, pool_param) as in the forward pass.

  Returns:
  - dx: Gradient with respect to x
  """
  #############################################################################
  # TODO: Implement the max pooling backward pass                             #
  #############################################################################

  # unpack layer cache
  x, pool_param = cache

  N, C, H, W = x.shape
  pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']

  dx = np.zeros_like(x)
  # Pooling layer backward using iterative method
  for ii, i in enumerate(xrange(0, H, stride)):
    for jj, j in enumerate(xrange(0, W, stride)):
      max_idx = np.argmax( x[:, :, i:i+pool_height,j:j+pool_width].reshape(N, C, -1), axis=2)

      max_cols = np.remainder(max_idx, pool_width) + j
      max_rows = max_idx / pool_width + i

      for n in xrange(N):
        for c in xrange(C):
          dx[n, c, max_rows[n, c], max_cols[n, c]] += dout[n, c, ii, jj]


  dx = dx.reshape(N, C, H, W)

  return dx 
Example #30
Source File: erbfilter.py    From pycochleagram with BSD 3-Clause Clear License 5 votes vote down vote up
def make_full_filter_set(filts, signal_length=None):
  """Create the full set of filters by extending the filterbank to negative FFT
  frequencies.

  Args:
    filts (array_like): Array containing the cochlear filterbank in frequency space,
      i.e., the output of make_erb_cos_filters_nx. Each row of ``filts`` is a
      single filter, with columns indexing frequency.
    signal_length (int, optional): Length of the signal to be filtered with this filterbank.
      This should be equal to filter length * 2 - 1, i.e., 2*filts.shape[1] - 1, and if
      signal_length is None, this value will be computed with the above formula.
      This parameter might be deprecated later.

  Returns:
    ndarray:
    **full_filter_set** -- Array containing the complete filterbank in
    frequency space. This output can be directly applied to the frequency
    representation of a signal.
  """
  if signal_length is None:
    signal_length = 2 * filts.shape[1] - 1

  # note that filters are currently such that each ROW is a filter and COLUMN idxs freq
  if np.remainder(signal_length, 2) == 0:  # even -- don't take the DC & don't double sample nyquist
    neg_filts = np.flipud(filts[1:filts.shape[0] - 1, :])
  else:  # odd -- don't take the DC
    neg_filts = np.flipud(filts[1:filts.shape[0], :])
  fft_filts = np.vstack((filts, neg_filts))
  # we need to switch representation to apply filters to fft of the signal, not sure why, but do it here
  return fft_filts.T