Python numpy.copy() Examples

The following are 30 code examples of numpy.copy(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: coroOnlyScheduler.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def scheduleRevisit(self, sInd, smin, det, pInds):
        """A Helper Method for scheduling revisits after observation detection
        Args:
            sInd - sInd of the star just detected
            smin - minimum separation of the planet to star of planet just detected
            det - 
            pInds - Indices of planets around target star
        Return:
            updates self.starRevisit attribute
        """

        TK = self.TimeKeeping

        t_rev = TK.currentTimeNorm.copy() + self.revisit_wait[sInd]
        # finally, populate the revisit list (NOTE: sInd becomes a float)
        revisit = np.array([sInd, t_rev.to('day').value])
        if self.starRevisit.size == 0:#If starRevisit has nothing in it
            self.starRevisit = np.array([revisit])#initialize sterRevisit
        else:
            revInd = np.where(self.starRevisit[:,0] == sInd)[0]#indices of the first column of the starRevisit list containing sInd 
            if revInd.size == 0:
                self.starRevisit = np.vstack((self.starRevisit, revisit))
            else:
                self.starRevisit[revInd,1] = revisit[1]#over 
Example #2
Source File: bbs.py    From cat-bbs with MIT License 6 votes vote down vote up
def add_border(self, val, img_shape=None):
        if val == 0:
            return self.copy()
        else:
            if isinstance(val, int):
                rect = Rectangle(x1=self.x1-val, x2=self.x2+val, y1=self.y1-val, y2=self.y2+val)
            elif isinstance(val, float):
                rect = Rectangle(x1=int(self.x1 - self.width*val), x2=int(self.x2 + self.width*val), y1=int(self.y1 - self.height*val), y2=int(self.y2 + self.height*val))
            elif isinstance(val, tuple):
                assert len(val) == 4, str(len(val))

                if all([isinstance(subval, int) for subval in val]):
                    rect = Rectangle(x1=self.x1-val[3], x2=self.x2+val[1], y1=self.y1-val[0], y2=self.y2+val[2])
                elif all([isinstance(subval, float) or subval == 0 for subval in val]): # "or subval==0" da sonst zB (0.1, 0, 0.1, 0) einen fehler erzeugt (0 ist int)
                    rect = Rectangle(x1=int(self.x1 - self.width*val[3]), x2=int(self.x2 + self.width*val[1]), y1=int(self.y1 - self.height*val[0]), y2=int(self.y2 + self.height*val[2]))
                else:
                    raise Exception("Tuple of all ints or tuple of all floats expected, got %s" % (str([type(v) for v in val]),))
            else:
                raise Exception("int or float or tuple of ints/floats expected, got %s" % (type(val),))

            if img_shape is not None:
                rect.fix_by_image_dimensions(height=img_shape[0], width=img_shape[1])

            return rect 
Example #3
Source File: bbs.py    From cat-bbs with MIT License 6 votes vote down vote up
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, copy=True, from_img=None):
        if copy:
            img = np.copy(img)

        orig_dtype = img.dtype
        if alpha != 1.0 and img.dtype != np.float32:
            img = img.astype(np.float32, copy=False)

        for rect in self:
            if from_img is not None:
                rect.resize(from_img, img).draw_on_image(img, color=color, alpha=alpha, copy=False)
            else:
                rect.draw_on_image(img, color=color, alpha=alpha, copy=False)

        if orig_dtype != img.dtype:
            img = img.astype(orig_dtype, copy=False)

        return img 
Example #4
Source File: predict_video.py    From cat-bbs with MIT License 6 votes vote down vote up
def process_frame(frame_idx, img, model, write_to_dir, conf_threshold, input_size=224):
    """Finds bounding boxes in a video frame, draws these bounding boxes
    and saves the result to HDD.
    """
    # find BBs in frame
    bbs, time_model = find_bbs(img, model, conf_threshold, input_size=input_size)

    # draw BBs
    img_out = np.copy(img)
    for (bb, score) in bbs:
        if score > conf_threshold and bb.width > 2 and bb.height > 2:
            img_out = bb.draw_on_image(img_out, color=[0, 255, 0], thickness=3)

    # save to output directory
    save_to_fp = os.path.join(write_to_dir, "%05d.jpg" % (frame_idx,))
    misc.imsave(save_to_fp, img_out)

    return time_model 
Example #5
Source File: predict_video.py    From cat-bbs with MIT License 6 votes vote down vote up
def _shrink_candidates(self, rect, depth):
        """Recursive function called by _shrink() to generate bounding box
        candidates that are smaller than the input bounding box."""
        result = [rect]

        if depth > 0:
            if rect.width > 1:
                rect_left = rect.copy(x1=rect.x1+1)
                rect_right = rect.copy(x2=rect.x2-1)
                result.extend(self._shrink_candidates(rect_left, depth=depth-1))
                result.extend(self._shrink_candidates(rect_right, depth=depth-1))

            if rect.height > 1:
                rect_top = rect.copy(y1=rect.y1+1)
                rect_bottom = rect.copy(y2=rect.y2-1)
                result.extend(self._shrink_candidates(rect_top, depth=depth-1))
                result.extend(self._shrink_candidates(rect_bottom, depth=depth-1))

        return result 
Example #6
Source File: predict_video.py    From cat-bbs with MIT License 6 votes vote down vote up
def _rects_reverse_projection(self, rects, img_shape, img_pad_shape, pad_top, pad_right, pad_bottom, pad_left):
        """Input images into the model are padded to make them squared. They
        are also resized to a smaller size. This function is supposed to
        remove both effects, i.e. to project the found bounding boxes from
        the padded and resized image to the unpadded und unresized (original)
        input image.
        """
        result = []
        for (rect, score) in rects:
            # project from resized padded (squared) image to unresized one
            rect_large = rect.on(img_pad_shape)
            # move rectangles to remove paddings
            rect_large_unpadded = rect_large.shift(top=-pad_top, left=-pad_left)
            # positions of corners are now correct, so switch underlying shape
            rect_large_unpadded = rect_large_unpadded.copy(shape=img_shape)
            result.append((rect_large_unpadded, score))
        return result 
Example #7
Source File: test.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def _get_rois_blob(im_rois, im_scale_factors):
    """Converts RoIs into network inputs.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        im_scale_factors (list): scale factors as returned by _get_image_blob
    Returns:
        blob (ndarray): R x 5 matrix of RoIs in the image pyramid
    """
    rois_blob_real = []

    for i in range(len(im_scale_factors)):
        rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
        rois_blob = np.hstack((levels, rois))
        rois_blob_real.append(rois_blob.astype(np.float32, copy=False))

    return rois_blob_real 
Example #8
Source File: test.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def _project_im_rois(im_rois, scales):
    """Project image RoIs into the image pyramid built by _get_image_blob.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        scales (list): scale factors as returned by _get_image_blob
    Returns:
        rois (ndarray): R x 4 matrix of projected RoI coordinates
        levels (list): image pyramid levels used by each projected RoI
    """
    im_rois = im_rois.astype(np.float, copy=False)

    if len(scales) > 1:
        widths = im_rois[:, 2] - im_rois[:, 0] + 1
        heights = im_rois[:, 3] - im_rois[:, 1] + 1
        areas = widths * heights
        scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
        diff_areas = np.abs(scaled_areas - 224 * 224)
        levels = diff_areas.argmin(axis=1)[:, np.newaxis]
    else:
        levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)

    rois = im_rois * scales[levels]

    return rois, levels 
Example #9
Source File: test_train.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def _project_im_rois(im_rois, scales):
    """Project image RoIs into the image pyramid built by _get_image_blob.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        scales (list): scale factors as returned by _get_image_blob
    Returns:
        rois (ndarray): R x 4 matrix of projected RoI coordinates
        levels (list): image pyramid levels used by each projected RoI
    """
    im_rois = im_rois.astype(np.float, copy=False)

    if len(scales) > 1:
        widths = im_rois[:, 2] - im_rois[:, 0] + 1
        heights = im_rois[:, 3] - im_rois[:, 1] + 1
        areas = widths * heights
        scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
        diff_areas = np.abs(scaled_areas - 224 * 224)
        levels = diff_areas.argmin(axis=1)[:, np.newaxis]
    else:
        levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)

    rois = im_rois * scales[levels]

    return rois, levels 
Example #10
Source File: pgd_cw_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #11
Source File: pgd_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #12
Source File: pgd_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #13
Source File: pgd_cw_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #14
Source File: gla_gpu.py    From Deep_VoiceChanger with MIT License 6 votes vote down vote up
def auto_inverse(self, whole_spectrum):
        whole_spectrum = np.copy(whole_spectrum).astype(complex)
        whole_spectrum[whole_spectrum < 1] = 1
        overwrap = self.buffer_size * 2
        height = whole_spectrum.shape[0]
        parallel_dif = (height-overwrap) // self.parallel
        if height < self.parallel*overwrap:
            raise Exception('voice length is too small to use gpu, or parallel number is too big')

        spec = [self.inverse(whole_spectrum[range(i, i+parallel_dif*self.parallel, parallel_dif), :]) for i in tqdm.tqdm(range(parallel_dif+overwrap))]
        spec = spec[overwrap:]
        spec = np.concatenate(spec, axis=1)
        spec = spec.reshape(-1, self.wave_len)

        #Below code don't consider wave_len and wave_dif, I'll fix.
        wave = np.fft.ifft(spec, axis=1).real
        pad = np.zeros((wave.shape[0], 2), dtype=float)
        wave = np.concatenate([wave, pad], axis=1)

        dst = np.zeros((wave.shape[0]+3)*self.wave_dif, dtype=float)
        for i in range(4):
            w = wave[range(i, wave.shape[0], 4),:]
            w = w.reshape(-1)
            dst[i*self.wave_dif:i*self.wave_dif+len(w)] += w
        return dst*0.5 
Example #15
Source File: Constraint.py    From fullrmc with GNU Affero General Public License v3.0 6 votes vote down vote up
def _set_used_data_weights(self, limitsIndexStart=None, limitsIndexEnd=None):
        # set used dataWeights
        if self.__dataWeights is None:
            self._usedDataWeights = None
        else:
            if limitsIndexStart is None:
                limitsIndexStart = 0
            if limitsIndexEnd is None:
                limitsIndexEnd = self.__experimentalData.shape[0]
            self._usedDataWeights  = np.copy(self.dataWeights[limitsIndexStart:limitsIndexEnd+1])
            assert np.sum(self._usedDataWeights), LOGGER.error("used points dataWeights are all zero.")
            self._usedDataWeights /= FLOAT_TYPE( np.sum(self._usedDataWeights) )
            self._usedDataWeights *= FLOAT_TYPE( len(self._usedDataWeights) )
        # dump to repository
        if self.engine is not None:
            isNormalFrame, isMultiframe, isSubframe = self.engine.get_frame_category(frame=self.engine.usedFrame)
            if isSubframe:
                LOGGER.usage("Setting experimental data weight for multiframe '%s' subframe. This is not going to automatically propagate to all other subframes."%(self.engine.usedFrame,))
        self._dump_to_repository({'_usedDataWeights': self._usedDataWeights}) 
Example #16
Source File: pgd_cw_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #17
Source File: test_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def almost_equal_ignore_nan(a, b, rtol=None, atol=None):
    """Test that two NumPy arrays are almost equal (ignoring NaN in either array).
    Combines a relative and absolute measure of approximate eqality.
    If either the relative or absolute check passes, the arrays are considered equal.
    Including an absolute check resolves issues with the relative check where all
    array values are close to zero.

    Parameters
    ----------
    a : np.ndarray
    b : np.ndarray
    rtol : None or float
        The relative threshold. Default threshold will be used if set to ``None``.
    atol : None or float
        The absolute threshold. Default threshold will be used if set to ``None``.
    """
    a = np.copy(a)
    b = np.copy(b)
    nan_mask = np.logical_or(np.isnan(a), np.isnan(b))
    a[nan_mask] = 0
    b[nan_mask] = 0

    return almost_equal(a, b, rtol, atol) 
Example #18
Source File: Constraint.py    From fullrmc with GNU Affero General Public License v3.0 6 votes vote down vote up
def _get_constraints_data(self, frame):
        """Get constraint and data for given frame. This is meant to be used
        internally. If used wrong, engine values can be altered unvoluntarely.
        It's generally meant to be used for plot and export purposes.

        :Parameters:
            #. frame (string): can be a traditional frame a d subframe or
               a multiframe

        :Returns:
            #. dataLUT (dict): a dictionary where keys are the given frame and
               all subframes if a multiframe is given. Values are dictionaries
               of the constraint and data copy
        """
        dataLUT = self._get_constraints_copy(frame)
        for frm in dataLUT:
            _constraint = dataLUT[frm]
            _data       = _constraint.data
            if _data is None or _constraint.engine.state != _constraint.state:
                LOGGER.usage("Computing constraint '{name}' data @{frame} without updating nor altering constraint properties and stochastic engine repository files".format(name=self.constraintName, frame=frm))
                _data, _ = _constraint.compute_data(update=False)
            dataLUT[frm] = {'constraint':_constraint, 'data':_data}
        # return
        return dataLUT 
Example #19
Source File: resizeScans.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def convertSize2(from_path, to_path, new_size, interpolation = 'interpolate'):
	if interpolation == 'interpolate':
		spline_order = [2]
	elif interpolation == 'nearest':
		spline_order = [0]

	img_np, affine = PP.numpyFromScan(from_path, get_affine = True)
	shape = img_np.shape
	new_affine = np.copy(affine)
	r1 = float(new_size[0]) / shape[0]
	r2 = float(new_size[1]) / shape[1] 
	r3 = float(new_size[2]) / shape[2] 
	new_affine[:,0] /= r1
	new_affine[:,1] /= r2
	new_affine[:,2] /= r3

	img_np = AUGM.applyScale([img_np], [r1,r2,r3], spline_order)[0].squeeze()

	PP.saveScan(img_np, new_affine, to_path)
	return new_affine 
Example #20
Source File: test_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def assert_almost_equal_ignore_nan(a, b, rtol=None, atol=None, names=('a', 'b')):
    """Test that two NumPy arrays are almost equal (ignoring NaN in either array).
    Combines a relative and absolute measure of approximate eqality.
    If either the relative or absolute check passes, the arrays are considered equal.
    Including an absolute check resolves issues with the relative check where all
    array values are close to zero.

    Parameters
    ----------
    a : np.ndarray
    b : np.ndarray
    rtol : None or float
        The relative threshold. Default threshold will be used if set to ``None``.
    atol : None or float
        The absolute threshold. Default threshold will be used if set to ``None``.
    """
    a = np.copy(a)
    b = np.copy(b)
    nan_mask = np.logical_or(np.isnan(a), np.isnan(b))
    a[nan_mask] = 0
    b[nan_mask] = 0

    assert_almost_equal(a, b, rtol, atol, names) 
Example #21
Source File: _mxnet_converter.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def check_error(model, path, shapes, output = 'softmax_output', verbose = True):
    """
    Check the difference between predictions from MXNet and CoreML.
    """
    coreml_model = _coremltools.models.MLModel(path)
    input_data = {}
    input_data_copy = {}
    for ip in shapes:
        input_data[ip] = _np.random.rand(*shapes[ip]).astype('f')
        input_data_copy[ip] = _np.copy(input_data[ip])

    dataIter = _mxnet.io.NDArrayIter(input_data_copy)
    mx_out = model.predict(dataIter).flatten()

    e_out_dict = coreml_model.predict(_mxnet_remove_batch(input_data))
    e_out = e_out_dict[output].flatten()
    error = _np.linalg.norm(e_out - mx_out)

    if verbose:
        print("First few predictions from CoreML : %s" % e_out[0:10])
        print("First few predictions from MXNet  : %s" % e_out[0:10])
        print("L2 Error on random data %s" % error)
    return error 
Example #22
Source File: align.py    From QCElemental with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def align_coordinates(self, geom, *, reverse=False) -> Array:
        """suitable for geometry or displaced geometry"""

        algeom = np.copy(geom)
        if reverse:
            algeom = algeom.dot(self.rotation)
            algeom = algeom + self.shift
            if self.mirror:
                algeom[:, 1] *= -1.0
        else:
            if self.mirror:
                algeom[:, 1] *= -1.0
            algeom = algeom - self.shift
            algeom = algeom.dot(self.rotation)
        algeom = algeom[self.atommap, :]
        # mirror-wise, rsm/msr == rms/msr

        return algeom 
Example #23
Source File: filter.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def copy(self):
        """Creates a new object with same state as self.

        Returns:
            copy (Filter): Copy of self"""
        raise NotImplementedError 
Example #24
Source File: filter.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def update(self, other, copy_buffer=False):
        """Takes another filter and only applies the information from the
        buffer.

        Using notation `F(state, buffer)`
        Given `Filter1(x1, y1)` and `Filter2(x2, yt)`,
        `update` modifies `Filter1` to `Filter1(x1 + yt, y1)`
        If `copy_buffer`, then `Filter1` is modified to
        `Filter1(x1 + yt, yt)`.
        """
        self.rs.update(other.buffer)
        if copy_buffer:
            self.buffer = other.buffer.copy()
        return 
Example #25
Source File: align.py    From QCElemental with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def align_vector(self, vec):
        """suitable for vector attached to molecule"""

        # sensible? TODO
        # alvec = np.copy(vec)
        # if self.mirror:
        #    alvec[:, 1] *= -1
        return vec.dot(self.rotation) 
Example #26
Source File: align.py    From QCElemental with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def align_gradient(self, grad) -> Array:
        """suitable for vector system attached to atoms"""

        # sensible? TODO
        # algrad = np.copy(grad)
        # if self.mirror:
        #    algrad[:, 1] *= -1
        algrad = grad.dot(self.rotation)
        algrad = algrad[self.atommap]

        return algrad 
Example #27
Source File: demo.py    From cascade-rcnn_Pytorch with MIT License 5 votes vote down vote up
def _get_image_blob(im):
    """Converts an image into a network input.
    Arguments:
      im (ndarray): a color image in BGR order
    Returns:
      blob (ndarray): a data blob holding an image pyramid
      im_scale_factors (list): list of image scales (relative to im) used
        in the image pyramid
    """
    im_orig = im.astype(np.float32, copy=True)  # RGB
    im_orig /= 255.0
    im_orig -= cfg.PIXEL_MEANS
    im_orig /= cfg.PIXEL_STDS

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.TEST.SCALES:
        im_scale = float(target_size) / float(im_size_min)
        # Prevent the biggest axis from being more than MAX_SIZE
        if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
            im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                        interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, np.array(im_scale_factors) 
Example #28
Source File: c3d.py    From Recipes with MIT License 5 votes vote down vote up
def rgb2caffe(im, out_size=(128, 171)):
    '''
    Converts an RGB image to caffe format and downscales it as needed by C3D

    Parameters
    ----------
    im numpy array
        an RGB image
    downscale

    Returns
    -------
    a caffe image (channel,height, width) in BGR format

    '''
    im=np.copy(im)
    if len(im.shape)==2: # Make sure the image has 3 channels
        im = color.gray2rgb(im)

    h, w, _ = im.shape
    im = skimage.transform.resize(im, out_size, preserve_range=True)
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)

    # Convert to BGR
    im = im[::-1, :, :]

    return np.array(im,theano.config.floatX) 
Example #29
Source File: dummy_vec_env.py    From lirpg with MIT License 5 votes vote down vote up
def step_wait(self):
        for i in range(self.num_envs):
            obs_tuple, self.buf_rews[i], self.buf_dones[i], self.buf_infos[i] = self.envs[i].step(self.actions[i])
            if self.buf_dones[i]:
                obs_tuple = self.envs[i].reset()
            if isinstance(obs_tuple, (tuple, list)):
                for t,x in enumerate(obs_tuple):
                    self.buf_obs[t][i] = x
            else:
                self.buf_obs[0][i] = obs_tuple
        return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
                self.buf_infos.copy()) 
Example #30
Source File: c3d.py    From Recipes with MIT License 5 votes vote down vote up
def convert_back(raw_im, image_mean=None,idx=0):
    '''
    Converts a Caffe format image back to the standard format, so that it can be plotted.

    Parameters
    ----------
    raw_im numpy array
        a bgr caffe image; format (channel,height, width)
    add_mean boolean
        Add the C3D mean?
    idx integer (default: 0)
        position in the snipplet (used for mean addtion, but differences are very small)

    Returns
    -------
    a RGB image; format (w,h,channel)
    '''

    raw_im=np.copy(raw_im)
    if image_mean is not None:
        raw_im += image_mean[idx,:,8:120,29:141].squeeze()

    # Convert to RGB
    raw_im = raw_im[::-1, :, :]

    # Back in (y,w,channel) order
    im = np.array(np.swapaxes(np.swapaxes(raw_im, 1, 0), 2, 1),np.uint8)
    return im