Python numpy.roll() Examples

The following are 30 code examples of numpy.roll(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: extraterrestrial_marauders.py    From pycolab with Apache License 2.0 6 votes vote down vote up
def update(self, actions, board, layers, backdrop, things, the_plot):
    # Where are the laser bolts? Only bolts from the player kill a Marauder.
    bolts = np.logical_or.reduce([layers[c] for c in UPWARD_BOLT_CHARS], axis=0)
    hits = bolts & self.curtain                       # Any hits to Marauders?
    np.logical_xor(self.curtain, hits, self.curtain)  # If so, zap the marauder...
    the_plot.add_reward(np.sum(hits)*10)              # ...and supply a reward.
    # Save the identities of marauder-striking bolts in the Plot.
    the_plot['marauder_hitters'] = [chr(c) for c in board[hits]]

    # If no Marauders are left, or if any are sitting on row 10, end the game.
    if (not self.curtain.any()) or self.curtain[10, :].any():
      return the_plot.terminate_episode()  # i.e. return None.

    # We move faster if there are fewer Marauders. The odd divisor causes speed
    # jumps to align on the high sides of multiples of 8; so, speed increases as
    # the number of Marauders decreases to 32 (or 24 etc.), not 31 (or 23 etc.).
    if the_plot.frame % max(1, np.sum(self.curtain)//8.0000001): return
    # If any Marauder reaches either side of the screen, reverse horizontal
    # motion and advance vertically one row.
    if np.any(self.curtain[:, 0] | self.curtain[:, -1]):
      self._dx = -self._dx
      self.curtain[:] = np.roll(self.curtain, shift=1, axis=0)
    self.curtain[:] = np.roll(self.curtain, shift=self._dx, axis=1) 
Example #2
Source File: utils_sisr.py    From KAIR with MIT License 6 votes vote down vote up
def p2o(psf, shape):
    '''
    Args:
        psf: NxCxhxw
        shape: [H,W]

    Returns:
        otf: NxCxHxWx2
    '''
    otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
    otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
    for axis, axis_size in enumerate(psf.shape[2:]):
        otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
    otf = torch.rfft(otf, 2, onesided=False)
    n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
    otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
    return otf 
Example #3
Source File: test_end2end.py    From derplearning with MIT License 6 votes vote down vote up
def test_perturb_learnability(frame, source_config, target_config):
    bbox = derp.util.get_patch_bbox(target_config, source_config)
    train_table, test_table = [], []
    for shift in np.linspace(-0.4, 0.4, 51):
        for rotate in np.linspace(-4, 4, 51):
            p_frame = derp.util.perturb(frame.copy(), source_config, shift, rotate)
            p_patch = derp.util.crop(p_frame, bbox)
            table = test_table if shift == 0 or rotate == 0 else train_table
            table.append([p_patch, torch.FloatTensor(), torch.FloatTensor([shift * 2.5,
                                                                           rotate * 0.25])])
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 
    train_fetcher, test_fetcher = Fetcher(train_table), Fetcher(test_table)
    train_loader = torch.utils.data.DataLoader(train_fetcher, 32, shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_fetcher, len(test_fetcher))
    model = derp.model.Tiny(np.roll(train_table[0][0].shape, 1), 0, 2).to(device)
    optimizer = torch.optim.AdamW(model.parameters(), 1E-3)
    criterion = torch.nn.MSELoss().to(device)
    test_losses = []
    for epoch in range(5):
        train_loss = derp.model.train_epoch(device, model, optimizer, criterion, train_loader)
        test_loss = derp.model.test_epoch(device, model, criterion, test_loader)
        test_losses.append(test_loss)
    assert min(test_losses) < 2E-3 
Example #4
Source File: utils_deblur.py    From KAIR with MIT License 6 votes vote down vote up
def p2o(psf, shape):
    '''
    # psf: NxCxhxw
    # shape: [H,W]
    # otf: NxCxHxWx2
    '''
    otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
    otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
    for axis, axis_size in enumerate(psf.shape[2:]):
        otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
    otf = torch.rfft(otf, 2, onesided=False)
    n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
    otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
    return otf



# otf2psf: not sure where I got this one from. Maybe translated from Octave source code or whatever. It's just math. 
Example #5
Source File: media.py    From subsync with Apache License 2.0 6 votes vote down vote up
def logloss(self, pred, actual, margin=12):
        blocks = secondsToBlocks(margin)
        logloss = np.ones(blocks*2)
        indices = np.ones(blocks*2)
        nonzero = np.nonzero(actual)[0]
        begin = max(nonzero[0]-blocks, 0)
        end = min(nonzero[-1]+blocks, len(actual)-1)
        pred = pred[begin:end]
        actual = actual[begin:end]
        for i, offset in enumerate(range(-blocks, blocks)):
            snippet = np.roll(actual, offset)
            try:
                logloss[i] = sklearn.metrics.log_loss(snippet[blocks:-blocks], pred[blocks:-blocks])
            except (ValueError, RuntimeWarning):
                pass
            indices[i] = offset

        return indices, logloss 
Example #6
Source File: post_proc.py    From HorizonNet with MIT License 6 votes vote down vote up
def gen_ww(init_coorx, coory, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512, tol=3, force_cuboid=True):
    gpid = get_gpid(init_coorx, coorW)
    coor = np.hstack([np.arange(coorW)[:, None], coory[:, None]])
    xy = np_coor2xy(coor, z, coorW, coorH, floorW, floorH)

    # Generate wall-wall
    if force_cuboid:
        xy_cor = gen_ww_cuboid(xy, gpid, tol)
    else:
        xy_cor = gen_ww_general(init_coorx, xy, gpid, tol)

    # Ceiling view to normal view
    cor = []
    for j in range(len(xy_cor)):
        next_j = (j + 1) % len(xy_cor)
        if xy_cor[j]['type'] == 1:
            cor.append((xy_cor[next_j]['val'], xy_cor[j]['val']))
        else:
            cor.append((xy_cor[j]['val'], xy_cor[next_j]['val']))
    cor = np_xy2coor(np.array(cor), z, coorW, coorH, floorW, floorH)
    cor = np.roll(cor, -2 * cor[::2, 0].argmin(), axis=0)

    return cor, xy_cor 
Example #7
Source File: mps.py    From tenpy with GNU General Public License v3.0 6 votes vote down vote up
def roll_mps_unit_cell(self, shift=1):
        """Shift the section we define as unit cellof an infinite MPS; in place.

        Suppose we have a unit cell with tensors ``[A, B, C, D]`` (repeated on both sites).
        With ``shift = 1``, the new unit cell will be ``[D, A, B, C]``,
        whereas ``shift = -1`` will give ``[B, C, D, A]``.

        Parameters
        ----------
        shift : int
            By how many sites to move the tensors to the right.
        """
        if self.finite:
            raise ValueError("makes only sense for infinite boundary conditions")
        inds = np.roll(np.arange(self.L), shift)
        self.sites = [self.sites[i] for i in inds]
        self.form = [self.form[i] for i in inds]
        self._B = [self._B[i] for i in inds]
        self._S = [self._S[i] for i in inds]
        self._S.append(self._S[0]) 
Example #8
Source File: pano_lsd_align.py    From HorizonNet with MIT License 6 votes vote down vote up
def computeUVN_vec(n, in_, planeID):
    '''
    vectorization version of computeUVN
    @n         N x 3
    @in_      MN x 1
    @planeID   N
    '''
    n = n.copy()
    if (planeID == 2).sum():
        n[planeID == 2] = np.roll(n[planeID == 2], 2, axis=1)
    if (planeID == 3).sum():
        n[planeID == 3] = np.roll(n[planeID == 3], 1, axis=1)
    n = np.repeat(n, in_.shape[0] // n.shape[0], axis=0)
    assert n.shape[0] == in_.shape[0]
    bc = n[:, [0]] * np.sin(in_) + n[:, [1]] * np.cos(in_)
    bs = n[:, [2]]
    out = np.arctan(-bc / (bs + 1e-9))
    return out 
Example #9
Source File: inference.py    From HorizonNet with MIT License 6 votes vote down vote up
def augment_undo(x_imgs_augmented, aug_type):
    x_imgs_augmented = x_imgs_augmented.cpu().numpy()
    sz = x_imgs_augmented.shape[0] // len(aug_type)
    x_imgs = []
    for i, aug in enumerate(aug_type):
        x_img = x_imgs_augmented[i*sz : (i+1)*sz]
        if aug == 'flip':
            x_imgs.append(np.flip(x_img, axis=-1))
        elif aug.startswith('rotate'):
            shift = int(aug.split()[-1])
            x_imgs.append(np.roll(x_img, -shift, axis=-1))
        elif aug == '':
            x_imgs.append(x_img)
        else:
            raise NotImplementedError()

    return np.array(x_imgs) 
Example #10
Source File: utils.py    From AdaptiveWingLoss with Apache License 2.0 6 votes vote down vote up
def fig2data(fig):
    """
    @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
    @param fig a matplotlib figure
    @return a numpy 3D array of RGBA values
    """
    # draw the renderer
    fig.canvas.draw ( )

    # Get the RGB buffer from the figure
    w,h = fig.canvas.get_width_height()
    buf = np.fromstring (fig.canvas.tostring_rgb(), dtype=np.uint8)
    buf.shape = (w, h, 3)

    # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
    buf = np.roll (buf, 3, axis=2)
    return buf 
Example #11
Source File: utils.py    From py360convert with MIT License 6 votes vote down vote up
def equirect_facetype(h, w):
    '''
    0F 1R 2B 3L 4U 5D
    '''
    tp = np.roll(np.arange(4).repeat(w // 4)[None, :].repeat(h, 0), 3 * w // 8, 1)

    # Prepare ceil mask
    mask = np.zeros((h, w // 4), np.bool)
    idx = np.linspace(-np.pi, np.pi, w // 4) / 4
    idx = h // 2 - np.round(np.arctan(np.cos(idx)) * h / np.pi).astype(int)
    for i, j in enumerate(idx):
        mask[:j, i] = 1
    mask = np.roll(np.concatenate([mask] * 4, 1), 3 * w // 8, 1)

    tp[mask] = 4
    tp[np.flip(mask, 0)] = 5

    return tp.astype(np.int32) 
Example #12
Source File: surface.py    From pytim with GNU General Public License v3.0 6 votes vote down vote up
def _compute_q_vectors(self, box):
        """ Compute the q-vectors compatible with the current box dimensions.

            Calculated quantities:
            q_vectors : two 2D arrays forming the grid of q-values, similar
                        to a meshgrid
            Qxy       : array of the different q-vectors
            Q         : squared module of Qxy with the first element missing
                        (no Q = 0.0)
        """
        self.box = np.roll(box, 2 - self.normal)
        nmax = list(map(int, np.ceil(self.box[0:2] / self.alpha)))
        self.q_vectors = np.mgrid[0:nmax[0], 0:nmax[1]] * 1.0
        self.q_vectors[0] *= 2. * np.pi / box[0]
        self.q_vectors[1] *= 2. * np.pi / box[1]
        self.modes_shape = self.q_vectors[0].shape
        qx = self.q_vectors[0][:, 0]
        qy = self.q_vectors[1][0]
        Qx = np.repeat(qx, len(qy))
        Qy = np.tile(qy, len(qx))
        self.Qxy = np.vstack((Qx, Qy)).T
        self.Q = np.sqrt(np.sum(self.Qxy * self.Qxy, axis=1)[1:]) 
Example #13
Source File: parsers.py    From recruit with Apache License 2.0 6 votes vote down vote up
def detect_colspecs(self, infer_nrows=100, skiprows=None):
        # Regex escape the delimiters
        delimiters = ''.join(r'\%s' % x for x in self.delimiter)
        pattern = re.compile('([^%s]+)' % delimiters)
        rows = self.get_rows(infer_nrows, skiprows)
        if not rows:
            raise EmptyDataError("No rows from which to infer column width")
        max_len = max(map(len, rows))
        mask = np.zeros(max_len + 1, dtype=int)
        if self.comment is not None:
            rows = [row.partition(self.comment)[0] for row in rows]
        for row in rows:
            for m in pattern.finditer(row):
                mask[m.start():m.end()] = 1
        shifted = np.roll(mask, 1)
        shifted[0] = 0
        edges = np.where((mask ^ shifted) == 1)[0]
        edge_pairs = list(zip(edges[::2], edges[1::2]))
        return edge_pairs 
Example #14
Source File: gla_util.py    From Deep_VoiceChanger with MIT License 5 votes vote down vote up
def inverse(self, spectrum, in_phase=None):
        spectrum = spectrum.astype(complex)
        if in_phase is None:
            in_phase = self.phase
        self.spectrum_buffer[-1] = spectrum * in_phase
        self.absolute_buffer[-1] = spectrum

        for _ in range(self.loop_num):
            self.overwrap_buf *= 0
            waves = np.fft.ifft(self.spectrum_buffer, axis=1).real
            last = self.spectrum_buffer

            for i in range(self.buffer_size):
                self.overwrap_buf[i*self.wave_dif:i*self.wave_dif+self.wave_len] += waves[i]
            waves = np.vstack([self.overwrap_buf[i*self.wave_dif:i*self.wave_dif+self.wave_len]*self.window for i in range(self.buffer_size)])

            spectrum = np.fft.fft(waves, axis=1)
            self.spectrum_buffer = self.absolute_buffer * spectrum / (np.abs(spectrum)+1e-10)
            self.spectrum_buffer += 0.5 * (self.spectrum_buffer - last)

        waves = np.fft.ifft(self.spectrum_buffer[0]).real
        self.absolute_buffer = np.roll(self.absolute_buffer, -1, axis=0)
        self.spectrum_buffer = np.roll(self.spectrum_buffer, -1, axis=0)

        self.wave_buf = np.roll(self.wave_buf, -self.wave_dif)
        self.wave_buf[-self.wave_dif:] = 0
        self.wave_buf[self.wave_dif:] += waves
        return self.wave_buf[:self.wave_dif]*0.5 
Example #15
Source File: signalprocessing.py    From pylops with GNU Lesser General Public License v3.0 5 votes vote down vote up
def nonstationary_convmtx(H, n, hc=0, pad=(0, 0)):
    r"""Convolution matrix from a bank of filters

    Makes a dense convolution matrix :math:`\mathbf{C}`
    such that the dot product ``np.dot(C, x)`` is the nonstationary
    convolution of the bank of filters :math:`H=[h_1, h_2, h_n]`
    and the input signal :math:`x`.

    Parameters
    ----------
    H : :obj:`np.ndarray`
        Convolution filters (2D array of shape
        :math:`[n_{filters} \times n_{h}]`
    n : :obj:`int`
        Number of columns of convolution matrix
    hc : :obj:`np.ndarray`, optional
        Index of center of first filter
    pad : :obj:`np.ndarray`
        Zero-padding to apply to the bank of filters before and after the
        provided values (use it to avoid wrap-around or pass filters with
        enough padding)

    Returns
    -------
    C : :obj:`np.ndarray`
        Convolution matrix

    """
    H = np.pad(H, ((0, 0), pad), mode='constant')
    C = np.array([np.roll(h, ih) for ih, h in enumerate(H)])
    C = C[:, pad[0] + hc:pad[0] + hc + n].T  # take away edges
    return C 
Example #16
Source File: t_maze.py    From pycolab with Apache License 2.0 5 votes vote down vote up
def update(self, actions, board, layers, backdrop, things, the_plot):
    # Is there a teleportation order for this frame?
    if the_plot.get('teleportation_order_frame', -1) != the_plot.frame: return
    row_shift, col_shift = the_plot['teleportation_order']
    self.whole_pattern[:] = np.roll(self.whole_pattern, -row_shift, axis=0)
    self.whole_pattern[:] = np.roll(self.whole_pattern, -col_shift, axis=1) 
Example #17
Source File: hello_world.py    From pycolab with Apache License 2.0 5 votes vote down vote up
def update(self, actions, board, layers, backdrop, all_things, the_plot):
    del board, layers, backdrop, all_things  # unused

    if actions is None: return  # No work needed to make the first observation.
    if actions == 4: the_plot.terminate_episode()  # Action 4 means "quit".

    # If the player has chosen a motion action, use that action to index into
    # the set of four rolls.
    if actions < 4:
      rolled = np.roll(self.curtain,  # Makes a copy, alas.
                       self._ROLL_SHIFTS[actions], self._ROLL_AXES[actions])
      np.copyto(self.curtain, rolled)
      the_plot.add_reward(1)  # Give ourselves a point for moving. 
Example #18
Source File: algs.py    From mabalgs with Apache License 2.0 5 votes vote down vote up
def select(self):
        """
            This method selects the best arm chosen by UCB1.

            :return: Return selected arm number.
                    Arm number returned is (n_arm - 1).

                    Returns a list of arms by importance.
                    The chosen arm is the index 0 of this list.
        """

        arm_dont_usage = np.where(self.number_of_selections == 0)[0]
        if len(arm_dont_usage) > 0:
            self.number_of_selections[arm_dont_usage[0]] += 1

            ranked_arms = list(range(len(self.number_of_selections)))

            if arm_dont_usage[0] != 0:
                ranked_arms = np.roll(ranked_arms, 1)
                first_element = ranked_arms[0]
                index_current = ranked_arms.tolist().index(arm_dont_usage[0])

                ranked_arms[0] = arm_dont_usage[0]
                ranked_arms[index_current] = first_element

            return arm_dont_usage[0], ranked_arms

        average_reward = self.rewards / self.number_of_selections
        total_counts = np.sum(self.number_of_selections)

        ucb_values = self._factor_importance_each_arm(
            total_counts,
            self.number_of_selections,
            average_reward
        )
        ranked_arms = np.flip(np.argsort(ucb_values), axis=0)
        chosen_arm = ranked_arms[0]

        self.number_of_selections[chosen_arm] += 1

        return chosen_arm, ranked_arms 
Example #19
Source File: Roll.py    From pylops with GNU Lesser General Public License v3.0 5 votes vote down vote up
def _rmatvec(self, x):
        if self.reshape:
            x = np.reshape(x, self.dims)
        y = np.roll(x, shift=-self.shift, axis=self.dir)
        return y.ravel() 
Example #20
Source File: Roll.py    From pylops with GNU Lesser General Public License v3.0 5 votes vote down vote up
def _matvec(self, x):
        if self.reshape:
            x = np.reshape(x, self.dims)
        y = np.roll(x, shift=self.shift, axis=self.dir)
        return y.ravel() 
Example #21
Source File: scrollingPlots.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def update1():
    global data1, ptr1
    data1[:-1] = data1[1:]  # shift data in the array one sample left
                            # (see also: np.roll)
    data1[-1] = np.random.normal()
    curve1.setData(data1)
    
    ptr1 += 1
    curve2.setData(data1)
    curve2.setPos(ptr1, 0)
    

# 2) Allow data to accumulate. In these examples, the array doubles in length
#    whenever it is full. 
Example #22
Source File: test_reshape.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_sparse_series_unstack(sparse_df, multi_index3):
    frame = pd.SparseSeries(np.ones(3), index=multi_index3).unstack()

    arr = np.array([1, np.nan, np.nan])
    arrays = {i: pd.SparseArray(np.roll(arr, i)) for i in range(3)}
    expected = pd.DataFrame(arrays)
    tm.assert_frame_equal(frame, expected) 
Example #23
Source File: utils_deblur.py    From KAIR with MIT License 5 votes vote down vote up
def otf2psf(otf, outsize=None):
    insize = np.array(otf.shape)
    psf = np.fft.ifftn(otf, axes=(0, 1))
    for axis, axis_size in enumerate(insize):
        psf = np.roll(psf, np.floor(axis_size / 2).astype(int), axis=axis)
    if type(outsize) != type(None):
        insize = np.array(otf.shape)
        outsize = np.array(outsize)
        n = max(np.size(outsize), np.size(insize))
        # outsize = postpad(outsize(:), n, 1);
        # insize = postpad(insize(:) , n, 1);
        colvec_out = outsize.flatten().reshape((np.size(outsize), 1))
        colvec_in = insize.flatten().reshape((np.size(insize), 1))
        outsize = np.pad(colvec_out, ((0, max(0, n - np.size(colvec_out))), (0, 0)), mode="constant")
        insize = np.pad(colvec_in, ((0, max(0, n - np.size(colvec_in))), (0, 0)), mode="constant")

        pad = (insize - outsize) / 2
        if np.any(pad < 0):
            print("otf2psf error: OUTSIZE must be smaller than or equal than OTF size")
        prepad = np.floor(pad)
        postpad = np.ceil(pad)
        dims_start = prepad.astype(int)
        dims_end = (insize - postpad).astype(int)
        for i in range(len(dims_start.shape)):
            psf = np.take(psf, range(dims_start[i][0], dims_end[i][0]), axis=i)
    n_ops = np.sum(otf.size * np.log2(otf.shape))
    psf = np.real_if_close(psf, tol=n_ops)
    return psf


# psf2otf copied/modified from https://github.com/aboucaud/pypher/blob/master/pypher/pypher.py 
Example #24
Source File: utilities.py    From pytim with GNU General Public License v3.0 5 votes vote down vote up
def get_pos(group, normal=2):
    return np.roll(group.positions, normal - 2, axis=-1) 
Example #25
Source File: categorical.py    From recruit with Apache License 2.0 5 votes vote down vote up
def shift(self, periods, fill_value=None):
        """
        Shift Categorical by desired number of periods.

        Parameters
        ----------
        periods : int
            Number of periods to move, can be positive or negative
        fill_value : object, optional
            The scalar value to use for newly introduced missing values.

            .. versionadded:: 0.24.0

        Returns
        -------
        shifted : Categorical
        """
        # since categoricals always have ndim == 1, an axis parameter
        # doesn't make any sense here.
        codes = self.codes
        if codes.ndim > 1:
            raise NotImplementedError("Categorical with ndim > 1.")
        if np.prod(codes.shape) and (periods != 0):
            codes = np.roll(codes, ensure_platform_int(periods), axis=0)
            if isna(fill_value):
                fill_value = -1
            elif fill_value in self.categories:
                fill_value = self.categories.get_loc(fill_value)
            else:
                raise ValueError("'fill_value={}' is not present "
                                 "in this Categorical's "
                                 "categories".format(fill_value))
            if periods > 0:
                codes[:periods] = fill_value
            else:
                codes[periods:] = fill_value

        return self.from_codes(codes, dtype=self.dtype) 
Example #26
Source File: surface.py    From pytim with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, interface, options=None):
        self.interface = weakref.proxy(interface)
        self.normal = interface.normal
        self.alpha = interface.alpha
        self.options = options
        self.z = self.normal
        try:
            self.xyz = np.roll(np.array([0, 1, 2]), 2 - self.z)
            self.xy = self.xyz[0:2]
        except:
            self.xyz, self.xy = None, None
        try:
            self._layer = options['layer']
        except (TypeError, KeyError):
            self._layer = 0 
Example #27
Source File: utilities_geometry.py    From pytim with GNU General Public License v3.0 5 votes vote down vote up
def find_surface_triangulation(interface):
    """
        identifies all triangles which are part of the surface
        :param GITIM interface: a GITIM interface instance
        :returns ndarray: (N,3) indices of the triangles' vertices
    """
    intr = interface
    cond = intr.atoms.layers == 1
    layer_1 = intr.atoms[cond]
    tri = None
    for roll in [0, 1, 2, 3]:
        # slimplices[i] goes from 0 -> len(cluster_group) + periodic copies
        # layer_1_ids links the atoms in the 1st layer to the indexing of
        # simplices's points
        layer_1_ids = np.argwhere(
            np.isin(intr.cluster_group.indices, layer_1.indices))
        rolled = np.roll(intr.triangulation[0].simplices, 0, axis=1)[:, :3]
        # requires that triplets of points in the simplices belong to the 1st
        # layer
        select = np.argwhere(np.all(np.isin(rolled, layer_1_ids),
                                    axis=1)).flatten()
        if tri is None:
            tri = rolled[select]
        else:
            tri = np.append(tri, rolled[select], axis=0)
    return tri 
Example #28
Source File: utilities.py    From pytim with GNU General Public License v3.0 5 votes vote down vote up
def get_box(universe, normal=2):
    box = universe.coord.dimensions[0:3]
    return np.roll(box, 2 - normal) 
Example #29
Source File: eval_logloss.py    From subsync with Apache License 2.0 5 votes vote down vote up
def logloss(pred, actual):
    begin = np.argmax(actual) * (-1)
    end = np.argmax(actual[::-1]) + 1
    print("Calculating {} logloss values".format(end-begin))
    logloss = np.zeros(end-begin)
    indices = np.zeros(end-begin)
    for i, offset in enumerate(range(begin, end)):
        logloss[i] = sklearn.metrics.log_loss(np.roll(actual, offset), pred)
        indices[i] = offset

    return indices, logloss 
Example #30
Source File: my_txtutils.py    From tensorflow-shakespeare-poem-generator with Apache License 2.0 5 votes vote down vote up
def rnn_minibatch_sequencer(raw_data, batch_size, sequence_size, nb_epochs):
    """
    Divides the data into batches of sequences so that all the sequences in one batch
    continue in the next batch. This is a generator that will keep returning batches
    until the input data has been seen nb_epochs times. Sequences are continued even
    between epochs, apart from one, the one corresponding to the end of raw_data.
    The remainder at the end of raw_data that does not fit in an full batch is ignored.
    :param raw_data: the training text
    :param batch_size: the size of a training minibatch
    :param sequence_size: the unroll size of the RNN
    :param nb_epochs: number of epochs to train on
    :return:
        x: one batch of training sequences
        y: on batch of target sequences, i.e. training sequences shifted by 1
        epoch: the current epoch number (starting at 0)
    """
    data = np.array(raw_data)
    data_len = data.shape[0]
    # using (data_len-1) because we must provide for the sequence shifted by 1 too
    nb_batches = (data_len - 1) // (batch_size * sequence_size)
    assert nb_batches > 0, "Not enough data, even for a single batch. Try using a smaller batch_size."
    rounded_data_len = nb_batches * batch_size * sequence_size
    xdata = np.reshape(data[0:rounded_data_len], [batch_size, nb_batches * sequence_size])
    ydata = np.reshape(data[1:rounded_data_len + 1], [batch_size, nb_batches * sequence_size])

    for epoch in range(nb_epochs):
        for batch in range(nb_batches):
            x = xdata[:, batch * sequence_size:(batch + 1) * sequence_size]
            y = ydata[:, batch * sequence_size:(batch + 1) * sequence_size]
            x = np.roll(x, -epoch, axis=0)  # to continue the text from epoch to epoch (do not reset rnn state!)
            y = np.roll(y, -epoch, axis=0)
            yield x, y, epoch