Python cv2.multiply() Examples

The following are 16 code examples of cv2.multiply(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: photometric.py    From mmcv with Apache License 2.0 6 votes vote down vote up
def imnormalize_(img, mean, std, to_rgb=True):
    """Inplace normalize an image with mean and std.

    Args:
        img (ndarray): Image to be normalized.
        mean (ndarray): The mean to be used for normalize.
        std (ndarray): The std to be used for normalize.
        to_rgb (bool): Whether to convert to rgb.

    Returns:
        ndarray: The normalized image.
    """
    # cv2 inplace normalization does not accept uint8
    assert img.dtype != np.uint8
    mean = np.float64(mean.reshape(1, -1))
    stdinv = 1 / np.float64(std.reshape(1, -1))
    if to_rgb:
        cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)  # inplace
    cv2.subtract(img, mean, img)  # inplace
    cv2.multiply(img, stdinv, img)  # inplace
    return img 
Example #2
Source File: cv.py    From deepstar with BSD 3-Clause Clear License 5 votes vote down vote up
def alpha_blend(background_, foreground_, mask_):
    background = background_.copy()
    foreground = foreground_.copy()
    mask = mask_.copy()

    background = background.astype(float)
    foreground = foreground.astype(float)
    mask = mask.astype(float) / 255
    foreground = cv2.multiply(mask, foreground)
    background = cv2.multiply(1.0 - mask, background)
    image = cv2.add(foreground, background)

    return image 
Example #3
Source File: photometric.py    From mmcv with Apache License 2.0 5 votes vote down vote up
def imdenormalize(img, mean, std, to_bgr=True):
    assert img.dtype != np.uint8
    mean = mean.reshape(1, -1).astype(np.float64)
    std = std.reshape(1, -1).astype(np.float64)
    img = cv2.multiply(img, std)  # make a copy
    cv2.add(img, mean, img)  # inplace
    if to_bgr:
        cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img)  # inplace
    return img 
Example #4
Source File: annotators.py    From videoflow with MIT License 5 votes vote down vote up
def _annotate(self, im : np.array, annotations : list) -> np.array:
        '''
        - Arguments:
            - im: np.array of shape (h, w, 3)
            - annotations: a list with 3 entries:
                - masks: np.array of shape (nb_masks, h, w)
                - classes: np.array of shape (nb_masks, )
                - scores: np.array of shape (nb_masks, )
        
        - Returns:
            - annotated_im: image with the visual annotations embedded in it.
        '''
        masks = annotations[0]
        classes = annotations[1]
        
        to_return = im.copy().astype(float)

        # TODO: Add border to masks
        # TODO: Think about how to solve issue of overlaping masks
        # TODO: Add class names to masks

        for idx, mask in enumerate(masks):
            alpha = cv2.merge((mask, mask, mask))
            alpha = alpha.astype(float)
            alpha[alpha == 1.0] = self._transparency

            #1. Mask foreground
            foreground = np.zeros_like(to_return, dtype = float)
            foreground[:] = self.colors[int(classes[idx]) % len(self.colors)]
            foreground = cv2.multiply(alpha, foreground)
            
            #2. Image background
            background = cv2.multiply(1.0 - alpha, to_return)
            to_return = cv2.add(foreground, background)
        
        return to_return.astype(np.uint8) 
Example #5
Source File: transformers.py    From videoflow with MIT License 5 votes vote down vote up
def _mask(self, im : np.array, mask : np.array) -> np.array:
        if mask.shape[:2] != im.shape[:2]:
            raise ValueError("`mask` does not have same dimensions as `im`")
        im = im.astype(float)
        alpha = cv2.merge((mask, mask, mask))
        masked = cv2.multiply(im, alpha)
        return masked.astype(np.uint8) 
Example #6
Source File: target_roi_builder.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def _find_blobs(self, im, scoring_fun):
        grey= cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
        rad = int(self._adaptive_med_rad * im.shape[1])
        if rad % 2 == 0:
            rad += 1

        med = np.median(grey)
        scale = 255/(med)
        cv2.multiply(grey,scale,dst=grey)
        bin = np.copy(grey)
        score_map = np.zeros_like(bin)
        for t in range(0, 255,5):
            cv2.threshold(grey, t, 255,cv2.THRESH_BINARY_INV,bin)
            if np.count_nonzero(bin) > 0.7 * im.shape[0] * im.shape[1]:
                continue
            if CV_VERSION == 3:
                _, contours, h = cv2.findContours(bin,cv2.RETR_EXTERNAL,CHAIN_APPROX_SIMPLE)
            else:
                contours, h = cv2.findContours(bin,cv2.RETR_EXTERNAL,CHAIN_APPROX_SIMPLE)

            bin.fill(0)
            for c in contours:
                score = scoring_fun(c, im)
                if score >0:
                    cv2.drawContours(bin,[c],0,score,-1)
            cv2.add(bin, score_map,score_map)
        return score_map 
Example #7
Source File: adaptive_bg_tracker.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        blur_rad = int(self._object_expected_size * np.max(img.shape) / 2.0)

        if blur_rad % 2 == 0:
            blur_rad += 1

        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)
        # cv2.imshow("dbg",self._buff_grey)
        cv2.GaussianBlur(self._buff_grey,(blur_rad,blur_rad),1.2, self._buff_grey)
        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)

        #
        mean = cv2.mean(self._buff_grey, mask)

        scale = 128. / mean[0]

        cv2.multiply(self._buff_grey, scale, dst = self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
Example #8
Source File: multi_fly_tracker.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        blur_rad = int(self._object_expected_size * np.max(img.shape) / 2.0)

        if blur_rad % 2 == 0:
            blur_rad += 1

        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)
        # cv2.imshow("dbg",self._buff_grey)
        cv2.GaussianBlur(self._buff_grey,(blur_rad,blur_rad),1.2, self._buff_grey)
        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)

        #
        mean = cv2.mean(self._buff_grey, mask)

        scale = 128. / mean[0]

        cv2.multiply(self._buff_grey, scale, dst = self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
Example #9
Source File: ColorAugmenters.py    From impy with Apache License 2.0 5 votes vote down vote up
def changeBrightness(self, frame = None, coefficient = None):
		"""
		Change the brightness of a frame.
		Args:
			frame: A tensor that contains an image.
			coefficient: A float that changes the brightness of the image.
									Default is a random number in the range of 2.
		Returns:
			A tensor with its brightness property changed.
		"""
		# Assertions
		if (self.assertion.assertNumpyType(frame) == False):
			raise ValueError("Frame has to be a numpy array.")
		if (len(frame.shape) == 3):
			channels = 3
		elif (len(frame.shape) == 2):
			channels = 1
		else:
			raise Exception("ERROR: Frame has to be either 1 or 3 channels.")
		if (coefficient == None):
			coefficient = np.random.rand()*2
		if (type(coefficient) != float):
			raise TypeError("ERROR: Coefficient parameter has to be of type float.")
		# Change brightness
		if (channels == 3):
			for i in range(channels):
				frame[:, :, i] = cv2.multiply(frame[:, :, i], coefficient)
		elif (channels == 1):
			frame[:, :] = cv2.multiply(frame[:, :], coefficient)
		# Force cast in case of overflow
		if (not (frame.dtype == np.uint8)):
			print("WARNING: Image is not dtype uint8. Forcing type.")
			frame = frame.astype(np.uint8)
		return frame 
Example #10
Source File: iou_util.py    From SCCvSD with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def ut_generate_grassland_mask():
    # An example of generate soft mask for grassland segmentation
    import scipy.io as sio

    index = 16 - 1   # image index from 1
    data = sio.loadmat('../../data/UoT_soccer/train_val.mat')
    annotation = data['annotation']
    homo = annotation[0][index][1]  # ground truth homography

    # step 1: generate a 'hard' grass mask
    template_h = 74
    template_w = 115
    tempalte_im = np.ones((template_h, template_w, 1), dtype=np.uint8) * 255

    grass_mask = IouUtil.homography_warp(homo, tempalte_im, (1280, 720), (0));
    cv.imshow('grass mask', grass_mask)
    cv.waitKey(0)

    # step 2: generate a 'soft' grass mask
    dist_threshold = 30  # change this value to change mask boundary
    _, binary_im = cv.threshold(grass_mask, 10, 255, cv.THRESH_BINARY_INV)

    dist_im = cv.distanceTransform(binary_im, cv.DIST_L2, cv.DIST_MASK_PRECISE)

    dist_im[dist_im > dist_threshold] = dist_threshold
    soft_mask = 1.0 - dist_im / dist_threshold  # normalize to [0, 1]

    cv.imshow('soft mask', soft_mask)
    cv.waitKey(0)

    # step 3: soft mask on the original image
    stacked_mask = np.stack((soft_mask,) * 3, axis=-1)
    im = cv.imread('../../data/16.jpg')
    soft_im = cv.multiply(stacked_mask, im.astype(np.float32)).astype(np.uint8)
    cv.imshow('soft masked image', soft_im)
    cv.waitKey(0) 
Example #11
Source File: exampleTrainer.py    From df with Mozilla Public License 2.0 4 votes vote down vote up
def get_training_data( images,landmarks,batch_size):
  while 1:
    indices = numpy.random.choice(range(0,images.shape[0]),size=batch_size,replace=True)
    for i,index in enumerate(indices):
      image = images[index]
      seed  = int(time.time())
      image = random_transform( image, seed, **random_transform_args )
      closest = ( numpy.mean(numpy.square(landmarks[index]-landmarks),axis=(1,2)) ).argsort()[1:20]
      closest = numpy.random.choice(closest, 6, replace=False)
      closestMerged = numpy.dstack([  
                                      cv2.resize( random_transform( images[closest[0]][:,:,:3] ,seed, **random_transform_args) , (64,64)), 
                                      cv2.resize( random_transform( images[closest[1]][:,:,:3] ,seed, **random_transform_args) , (64,64)), 
                                      cv2.resize( random_transform( images[closest[2]][:,:,:3] ,seed, **random_transform_args) , (64,64)),
                                      cv2.resize( random_transform( images[closest[3]][:,:,:3] ,seed, **random_transform_args) , (64,64)),
                                      cv2.resize( random_transform( images[closest[4]][:,:,:3] ,seed, **random_transform_args) , (64,64)),
                                      cv2.resize( random_transform( images[closest[5]][:,:,:3] ,seed, **random_transform_args) , (64,64)),
                                    ])

      if i == 0:
          warped_images  = numpy.empty( (batch_size,)  + (64,64,3),   image.dtype )
          example_images = numpy.empty( (batch_size,)  + (64,64,18),  image.dtype )
          target_images  = numpy.empty( (batch_size,)  + (128,128,3), image.dtype )
          mask_images    = numpy.empty( (batch_size,)  + (128,128,1), image.dtype )

      warped_image =  random_warp( image[:,:,:3] )

      warped_image =  cv2.GaussianBlur( warped_image,(91,91),0 )

      image_mask = image[:,:,3].reshape((image.shape[0],image.shape[1],1)) * numpy.ones((image.shape[0],image.shape[1],3)).astype(float)


      foreground = cv2.multiply(image_mask, warped_image.astype(float))
      background = cv2.multiply(1.0 - image_mask, image[:,:,:3].astype(float))

      warped_image = numpy.add(background,foreground)

      warped_image = cv2.resize(warped_image,(64,64))

      warped_images[i]  = warped_image
      example_images[i] = closestMerged
      target_images[i]  = cv2.resize( image[:,:,:3], (128,128) )
      mask_images[i]    = cv2.resize( image[:,:,3], (128,128) ).reshape((128,128,1))
    yield warped_images,example_images,target_images,mask_images 
Example #12
Source File: blend.py    From imgaug with MIT License 4 votes vote down vote up
def _blend_alpha_uint8_elementwise_(image_fg, image_bg, alphas):
    betas = 1.0 - alphas

    is_2d = (alphas.ndim == 2 or alphas.shape[2] == 1)
    area = image_fg.shape[0] * image_fg.shape[1]
    if is_2d and area >= 64*64:
        if alphas.ndim == 3:
            alphas = alphas[:, :, 0]
            betas = betas[:, :, 0]

        result = []
        for c in range(image_fg.shape[2]):
            image_fg_mul = image_fg[:, :, c]
            image_bg_mul = image_bg[:, :, c]
            image_fg_mul = cv2.multiply(image_fg_mul, alphas, dtype=cv2.CV_8U)
            image_bg_mul = cv2.multiply(image_bg_mul, betas, dtype=cv2.CV_8U)
            image_fg_mul = cv2.add(image_fg_mul, image_bg_mul, dst=image_fg_mul)
            result.append(image_fg_mul)

        image_blend = _merge_channels(result, image_fg.ndim == 3)
        return image_blend
    else:
        if alphas.ndim == 2:
            alphas = alphas[..., np.newaxis]
            betas = betas[..., np.newaxis]
        if alphas.shape[2] != image_fg.shape[2]:
            alphas = np.tile(alphas, (1, 1, image_fg.shape[2]))
            betas = np.tile(betas, (1, 1, image_fg.shape[2]))

        alphas = alphas.ravel()
        betas = betas.ravel()
        input_shape = image_fg.shape

        image_fg_mul = image_fg.ravel()
        image_bg_mul = image_bg.ravel()
        image_fg_mul = cv2.multiply(
            image_fg_mul, alphas, dtype=cv2.CV_8U, dst=image_fg_mul
        )
        image_bg_mul = cv2.multiply(
            image_bg_mul, betas, dtype=cv2.CV_8U, dst=image_bg_mul
        )

        image_fg_mul = cv2.add(image_fg_mul, image_bg_mul, dst=image_fg_mul)

        return image_fg_mul.reshape(input_shape)


# Added in 0.5.0.
# (Extracted from blend_alpha().) 
Example #13
Source File: adaptive_bg_tracker.py    From ethoscope with GNU General Public License v3.0 4 votes vote down vote up
def update(self, img_t, t, fg_mask=None):
        dt = float(t - self.last_t)
        if dt < 0:
            # raise EthoscopeException("Negative time interval between two consecutive frames")
            raise NoPositionError("Negative time interval between two consecutive frames")

        # clip the half life to possible value:
        self._current_half_life = np.clip(self._current_half_life, self._min_half_life, self._max_half_life)

        # ensure preallocated buffers exist. otherwise, initialise them
        if self._bg_mean is None:
            self._bg_mean = img_t.astype(np.float32)
            # self._bg_sd = np.zeros_like(img_t)
            # self._bg_sd.fill(128)

        if self._buff_alpha_matrix is None:
            self._buff_alpha_matrix = np.ones_like(img_t,dtype = np.float32)

        # the learning rate, alpha, is an exponential function of half life
        # it correspond to how much the present frame should account for the background

        lam =  np.log(2)/self._current_half_life
        # how much the current frame should be accounted for
        alpha = 1 - np.exp(-lam * dt)

        # set-p a matrix of learning rate. it is 0 where foreground map is true
        self._buff_alpha_matrix.fill(alpha)
        if fg_mask is not None:
            cv2.dilate(fg_mask,None,fg_mask)
            cv2.subtract(self._buff_alpha_matrix, self._buff_alpha_matrix, self._buff_alpha_matrix, mask=fg_mask)


        if self._buff_invert_alpha_mat is None:
            self._buff_invert_alpha_mat = 1 - self._buff_alpha_matrix
        else:
            np.subtract(1, self._buff_alpha_matrix, self._buff_invert_alpha_mat)

        np.multiply(self._buff_alpha_matrix, img_t, self._buff_alpha_matrix)
        np.multiply(self._buff_invert_alpha_mat, self._bg_mean, self._buff_invert_alpha_mat)
        np.add(self._buff_alpha_matrix, self._buff_invert_alpha_mat, self._bg_mean)

        self.last_t = t 
Example #14
Source File: adaptive_bg_tracker.py    From ethoscope with GNU General Public License v3.0 4 votes vote down vote up
def _pre_process_input(self, img, mask, t):

        blur_rad = int(self._object_expected_size * np.max(img.shape) * 2.0)
        if blur_rad % 2 == 0:
            blur_rad += 1


        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            self._buff_grey_blurred = np.empty_like(self._buff_grey)
            # self._buff_grey_blurred = np.empty_like(self._buff_grey)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

            mask_conv = cv2.blur(mask,(blur_rad, blur_rad))

            self._buff_convolved_mask  = (1/255.0 *  mask_conv.astype(np.float32))


        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)

        hist = cv2.calcHist([self._buff_grey], [0], None, [256], [0,255]).ravel()
        hist = np.convolve(hist, [1] * 3)
        mode =  np.argmax(hist)

        self._smooth_mode.append(mode)
        self._smooth_mode_tstamp.append(t)

        if len(self._smooth_mode_tstamp) >2 and self._smooth_mode_tstamp[-1] - self._smooth_mode_tstamp[0] > self._smooth_mode_window_dt:
            self._smooth_mode.popleft()
            self._smooth_mode_tstamp.popleft()


        mode = np.mean(list(self._smooth_mode))
        scale = 128. / mode

        # cv2.GaussianBlur(self._buff_grey,(5,5), 1.5,self._buff_grey)

        cv2.multiply(self._buff_grey, scale, dst = self._buff_grey)


        cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)

        cv2.blur(self._buff_grey,(blur_rad, blur_rad), self._buff_grey_blurred)
        #fixme could be optimised
        self._buff_grey_blurred = (self._buff_grey_blurred / self._buff_convolved_mask).astype(np.uint8)


        cv2.absdiff(self._buff_grey, self._buff_grey_blurred, self._buff_grey)

        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
Example #15
Source File: separate_background.py    From detectron2-pipeline with MIT License 4 votes vote down vote up
def separate_background(self, data):
        if "predictions" not in data:
            return

        predictions = data["predictions"]
        if "instances" not in predictions:
            return

        instances = predictions["instances"]
        if not instances.has("pred_masks"):
            return

        # Sum up all the instance masks
        mask = instances.pred_masks.cpu().sum(0) >= 1
        mask = mask.numpy().astype("uint8")*255
        # Create 3-channels mask
        mask = np.stack([mask, mask, mask], axis=2)

        # Apply a slight blur to the mask to soften edges
        mask = cv2.GaussianBlur(mask, self.me_kernel, 0)

        # Take the foreground input image
        foreground = data["image"]

        # Create a Gaussian blur for the background image
        background = cv2.GaussianBlur(foreground, self.bg_kernel, 0)

        if self.desaturate:
            # Convert background into grayscale
            background = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)

            # convert single channel grayscale image to 3-channel grayscale image
            background = cv2.cvtColor(background, cv2.COLOR_GRAY2RGB)

        # Convert uint8 to float
        foreground = foreground.astype(float)
        background = background.astype(float)

        # Normalize the alpha mask to keep intensity between 0 and 1
        mask = mask.astype(float)/255.0

        # Multiply the foreground with the mask
        foreground = cv2.multiply(foreground, mask)

        # Multiply the background with ( 1 - mask )
        background = cv2.multiply(background, 1.0 - mask)

        # Add the masked foreground and background
        dst_image = cv2.add(foreground, background)

        # Return a normalized output image for display
        data[self.dst] = dst_image.astype("uint8") 
Example #16
Source File: ColorAugmenters.py    From impy with Apache License 2.0 4 votes vote down vote up
def sharpening(self, frame = None, weight = None):
		"""
		Sharpens an image using the following system:
		frame = I(x, y, d)
		gray_frame(xi, yi) = sum(I(xi, yi, d) * [0.6, 0.3, 0.1])
		hff_kernel = [[-1,-1,-1],[-1,8,-1],[-1,-1,-1]]
		edges(x, y) = hff_kernel * gray_frame
		weight = 2.0
		sharpened(x, y, di) = (edges x weight) + frame(x, y, di)
		Args:
			frame: A tensor that contains an image.
			weight: A float that contains the weight coefficient.
		Returns:
			A sharpened tensor.
		"""
		# Assertions
		if (self.assertion.assertNumpyType(frame) == False):
			raise ValueError("Frame has to be a numpy array.")
		if (len(frame.shape) == 3):
			channels = 3
		elif (len(frame.shape) == 2):
			channels = 1
		else:
			raise ValueError("Frame not understood.")
		if (weight == None):
			weight = 2.0
		if (type(weight) != float):
			raise TypeError("ERROR: Weight has to be a float.")
		# Local variables
		hff_kernel = np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]])
		# Logic
		if (channels == 3):
			gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			edges = cv2.filter2D(gray_frame, -1, hff_kernel)
			edges = cv2.multiply(edges, weight)
			sharpened = np.zeros(frame.shape, np.uint8)
			for i in range(channels):
				sharpened[:, :, i] = cv2.add(frame[:, :, i], edges)
		else:
			edges = cv2.filter2D(frame, -1, hff_kernel)
			edges = cv2.multiply(edges, weight)
			sharpened[:, :] = cv2.add(frame[:, :], edges)
		if (not (sharpened.dtype == np.uint8)):
			print("WARNING: Image is not dtype uint8. Forcing type.")
			sharpened = sharpened.astype(np.uint8)
		return sharpened