Python cv2.imwrite() Examples

The following are code examples for showing how to use cv2.imwrite(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: SelfDrivingCar   Author: aguijarro   File: calibration_camera.py    (license) View Source Project 17 votes vote down vote up
def get_points():

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images = glob.glob('calibration_wide/GO*.jpg')

    # Step through the list and search for chessboard corners
    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (8,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (8,6), corners, ret)
            #write_name = 'corners_found'+str(idx)+'.jpg'
            #cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()
    return objpoints, imgpoints 
Example 2
Project: AVSR-Deep-Speech   Author: pandeydivesh15   File: data_preprocessing_autoencoder.py    (GNU General Public License v2.0) View Source Project 12 votes vote down vote up
def crop_and_store(frame, mouth_coordinates, name):
	"""
	Args:
		1. frame:				The frame which has to be cropped.
		2. mouth_coordinates:	The coordinates which help in deciding which region is to be cropped.
		3. name:				The path name to be used for storing the cropped image.
	"""

	# Find bounding rectangle for mouth coordinates
	x, y, w, h = cv2.boundingRect(mouth_coordinates)

	mouth_roi = frame[y:y + h, x:x + w]

	h, w, channels = mouth_roi.shape
	# If the cropped region is very small, ignore this case.
	if h < 10 or w < 10:
		return
	
	resized = resize(mouth_roi, 32, 32)
	cv2.imwrite(name, resized) 
Example 3
Project: facial_emotion_recognition   Author: adamaulia   File: image_test.py    (license) View Source Project 11 votes vote down vote up
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX
    
    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)
    
    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)
            
    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0) 
Example 4
Project: Deep360Pilot-optical-flow   Author: yenchenlin   File: flo2img.py    (license) View Source Project 10 votes vote down vote up
def convert_wrapper(path, outpath, Debug=False):
    for filename in sorted(os.listdir(path)):
        if filename.endswith('.flo'):
            filename = filename.replace('.flo','')

            flow = read_flow(path, filename)
            flow_img = convert_flow(flow, 2.0)

            # NOTE: Change from BGR (OpenCV format) to RGB (Matlab format) to fit Matlab output
            flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)

            #print "Saving {}.png with shape: {}".format(filename, flow_img.shape)
            cv2.imwrite(outpath + filename + '.png', flow_img)

            if Debug:
                ret = imchecker(outpath + filename)



# Sanity check and comparison if we have matlab version image 
Example 5
Project: Mini-Projects   Author: gaborvecsei   File: Capture_Img_To_Drive.py    (license) View Source Project 10 votes vote down vote up
def CaptureImage():
	imageName = 'DontCare.jpg' #Just a random string
	cap = cv2.VideoCapture(0)
	while(True):
	    # Capture frame-by-frame
	    ret, frame = cap.read()

	    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
	    rgbImage = frame #For capture the image in RGB color space

	    # Display the resulting frame
	    cv2.imshow('Webcam',rgbImage)
	    #Wait to press 'q' key for capturing
	    if cv2.waitKey(1) & 0xFF == ord('q'):
	        #Set the image name to the date it was captured
	        imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
	        #Save the image
	        cv2.imwrite(imageName, rgbImage)
	        break
	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
	#Returns the captured image's name
	return imageName 
Example 6
Project: blind-watermark   Author: linyacool   File: encode.py    (license) View Source Project 9 votes vote down vote up
def encode(img_path, wm_path, res_path, alpha):
    img = cv2.imread(img_path)
    img_f = np.fft.fft2(img)
    height, width, channel = np.shape(img)
    watermark = cv2.imread(wm_path)
    wm_height, wm_width = watermark.shape[0], watermark.shape[1]
    x, y = range(height / 2), range(width)
    random.seed(height + width)
    random.shuffle(x)
    random.shuffle(y)
    tmp = np.zeros(img.shape)
    for i in range(height / 2):
        for j in range(width):
            if x[i] < wm_height and y[j] < wm_width:
                tmp[i][j] = watermark[x[i]][y[j]]
                tmp[height - 1 - i][width - 1 - j] = tmp[i][j]
    res_f = img_f + alpha * tmp
    res = np.fft.ifft2(res_f)
    res = np.real(res)
    cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example 7
Project: MusicGenerator   Author: Conchylicultor   File: imgconnector.py    (license) View Source Project 8 votes vote down vote up
def write_song(piano_roll, filename):
        """ Save the song on disk
        Args:
            piano_roll (np.array): a song object containing the tracks and melody
            filename (str): the path were to save the song (don't add the file extension)
        """
        note_played = piano_roll > 0.5
        piano_roll_int = np.uint8(piano_roll*255)

        b = piano_roll_int * (~note_played).astype(np.uint8)  # Note silenced
        g = np.zeros(piano_roll_int.shape, dtype=np.uint8)    # Empty channel
        r = piano_roll_int * note_played.astype(np.uint8)     # Notes played

        img = cv.merge((b, g, r))

        # TODO: We could insert a first column indicating the piano keys (black/white key)

        cv.imwrite(filename + '.png', img) 
Example 8
Project: lsun_2017   Author: ternaus   File: downscale_images.py    (MIT License) View Source Project 7 votes vote down vote up
def downscale(old_file_name):
    img = cv2.imread(os.path.join(old_file_name))

    new_file_name = (old_file_name
                     .replace('training', 'training_' + str(min_size))
                     .replace('validation', 'validation_' + str(min_size))
                     .replace('testing', 'testing_' + str(min_size))
                     )

    height, width, _ = img.shape

    if width > height:
        new_width = int(1.0 * width / height * min_size)
        new_height = min_size

    else:
        new_height = int(1.0 * height / width * min_size)
        new_width = min_size

    img_new = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
    cv2.imwrite(new_file_name, img_new) 
Example 9
Project: Video-Classification-Action-Recognition   Author: qijiezhao   File: build_of.py    (license) View Source Project 7 votes vote down vote up
def dump_frames(vid_path):
    import cv2
    video = cv2.VideoCapture(vid_path)
    vid_name = vid_path.split('/')[-1].split('.')[0]
    out_full_path = os.path.join(out_path, vid_name)

    fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    try:
        os.mkdir(out_full_path)
    except OSError:
        pass
    file_list = []
    for i in xrange(fcount):
        ret, frame = video.read()
        assert ret
        cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
        access_path = '{}/{:06d}.jpg'.format(vid_name, i)
        file_list.append(access_path)
    print '{} done'.format(vid_name)
    sys.stdout.flush()
    return file_list 
Example 10
Project: piwall-cvtools   Author: infinnovation   File: interactive_demo_crop.py    (license) View Source Project 7 votes vote down vote up
def on_mouse(event, x, y, flags, params):
    # global img
    t = time()
    
    if event == cv2.EVENT_LBUTTONDOWN:
        print 'Start Mouse Position: '+str(x)+', '+str(y)
        sbox = [x, y]
        boxes.append(sbox)
             # print count
             # print sbox
             
    elif event == cv2.EVENT_LBUTTONUP:
        print 'End Mouse Position: '+str(x)+', '+str(y)
        ebox = [x, y]
        boxes.append(ebox)
        print boxes
        crop = img[boxes[-2][1]:boxes[-1][1],boxes[-2][0]:boxes[-1][0]]

        cv2.imshow('crop',crop)
        k =  cv2.waitKey(0)
        if ord('r')== k:
            cv2.imwrite('Crop'+str(t)+'.jpg',crop)
            print "Written to file" 
Example 11
Project: bib-tagger   Author: KateRita   File: find_bibs.py    (license) View Source Project 7 votes vote down vote up
def find_bibs(image):
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  binary = cv2.GaussianBlur(gray,(5,5),0)
  ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
  #ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY);

  #lapl = cv2.Laplacian(image,cv2.CV_64F)
  #gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY);
  #blurred = cv2.GaussianBlur(lapl,(5,5),0)
  #ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #cv2.imwrite("lapl.jpg", lapl)

  edges = cv2.Canny(image,175,200)
  cv2.imwrite("edges.jpg", edges)
  binary = edges

  cv2.imwrite("binary.jpg", binary)
  contours,hierarchy = find_contours(binary)

  return get_rectangles(contours) 
Example 12
Project: bib-tagger   Author: KateRita   File: find_bibs.py    (license) View Source Project 7 votes vote down vote up
def find_lines(img):
  edges = cv2.Canny(img,100,200)
  threshold = 60
  minLineLength = 10
  lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold, 0, minLineLength, 20);
  if (lines is None or len(lines) == 0):
      return

  #print lines
  for line in lines[0]:
    #print line
    cv2.line(img, (line[0],line[1]), (line[2],line[3]), (0,255,0), 2)
  cv2.imwrite("line_edges.jpg", edges)
  cv2.imwrite("lines.jpg", img) 
Example 13
Project: bib-tagger   Author: KateRita   File: test_featuredetector.py    (license) View Source Project 7 votes vote down vote up
def test_featuredetector(self):

        print self.photodir

        #read in images
        images = []
        for i in np.arange(1,7):
            images.append(cv2.imread(os.path.join(self.photodir,"Frosty5k","{}.jpg".format(i))))

        #read in bib
        bib = cv2.imread(os.path.join(self.photodir,"Frosty5k","bib.jpg"))

        for i in np.arange(1,7):
            image = images[i-1]
            bib_kp, image_kp, matches = fd.findMatchesBetweenImages(bib, image)
            output = fd.drawMatches(bib, bib_kp, image, image_kp, matches)

            ftoutdir = os.path.join(self.photooutdir,"features")
            print "Writing images to folder {}".format(ftoutdir)

            if not os.path.exists(ftoutdir):
                os.makedirs(ftoutdir)

            cv2.imwrite(os.path.join(ftoutdir,"{}matches.jpg".format(i)), output) 
Example 14
Project: pybot   Author: spillai   File: imshow_utils.py    (license) View Source Project 6 votes vote down vote up
def imshow_cv(label, im, block=False, text=None, wait=2): 
    vis = im.copy()
    print_status(vis, text=text)
    window_manager.imshow(label, vis)
    ch = cv2.waitKey(0 if block else wait) & 0xFF
    if ch == ord(' '):
        cv2.waitKey(0)
    if ch == ord('v'):
        print('Entering debug mode, image callbacks active')
        while True: 
            ch = cv2.waitKey(10) & 0xFF
            if ch == ord('q'): 
                print('Exiting debug mode!')
                break
    if ch == ord('s'):
        fn = 'img-%s.png' % time.strftime("%Y-%m-%d-%H-%M-%S")
        print 'Saving %s' % fn
        cv2.imwrite(fn, vis)
    elif ch == 27 or ch == ord('q'):
        sys.exit(1) 
Example 15
Project: chainer-gan-experiments   Author: Aixile   File: save_images.py    (MIT License) View Source Project 6 votes vote down vote up
def save_images_grid(imgs, path, grid_w=4, grid_h=4, post_processing=postprocessing_tanh, transposed=False):
    imgs = copy_to_cpu(imgs)
    if post_processing is not None:
        imgs = post_processing(imgs)
    b, ch, w, h = imgs.shape
    assert b == grid_w*grid_h

    imgs = imgs.reshape((grid_w, grid_h, ch, w, h))
    imgs = imgs.transpose(0, 1, 3, 4, 2)
    if transposed:
        imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(1, 2, 0, 3, 4).reshape((grid_h*w, grid_w*h, ch))
    else:
        imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(0, 2, 1, 3, 4).reshape((grid_w*w, grid_h*h, ch))
    if ch==1:
        imgs = imgs.reshape((grid_w*w, grid_h*h))
    cv2.imwrite(path, imgs) 
Example 16
Project: DmsMsgRcg   Author: bshao001   File: convertmodel.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def s1_predict(config_file, model_dir, model_file, predict_file_list, out_dir):
    """
    This function serves as a test/validation tool during the model development. It is not used as
    a final product in part of the pipeline.
    """
    with open(config_file) as config_buffer:
        config = json.loads(config_buffer.read())

    with tf.Graph().as_default() as graph:
        converted_model = ConvertedModel(config, graph, 's1_keras', model_dir, model_file)

    with tf.Session(graph=graph) as sess:
        for img_file in predict_file_list:
            image = cv2.imread(img_file)
            boxes = converted_model.predict(sess, image)
            image = draw_boxes(image, boxes)

            _, filename = os.path.split(img_file)
            cv2.imwrite(os.path.join(out_dir, filename), image) 
Example 17
Project: SudokuVisionSolver   Author: tusharsircar95   File: main.py    (license) View Source Project 6 votes vote down vote up
def getTestImage(img,size):	
	size = int(size)
	(x,y) = np.shape(img)
	left,right,bottom,top = x,0,y,0
	count = 0
	for i in range(x):
		for j in range(y):
			if img[i][j] == 255:
				left = min(left,i)
				right = max(right,i)
				top = max(top,j)
				bottom = min(bottom,j)

				count = count + 1
	if count == 0:
		return img
	
	img = img[left:right,bottom:top]
	cv2.imwrite('template.jpg',img)
	return img
	
# Divides the grid into 9x9 = 81 cells and does OCR on each after processing it 
Example 18
Project: specularity-removal   Author: gmichaeljaison   File: main.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def remove_specularity(img_files):
    """
    Removes highlights/specularity in Images from multiple view points

    :param img_files: File names of input images in horizontal order (important)
    """
    # read images from file names
    imgs = read_images(img_files)

    # solve each pair of image.
    # assumption: Input images are in order
    for i in range(len(imgs) - 1):
        logging.debug('processing images {} and {}'.format(i+1, i+2))
        imgs[i], imgs[i+1] = _solve(imgs[i], imgs[i + 1])

    for i, path in enumerate(img_files):
        fname = os.path.basename(path)
        res_file = os.path.join(RESULTS_DIR, fname)

        logging.info('saving the results in {}'.format(res_file))
        cv.imwrite(res_file, imgs[i]) 
Example 19
Project: sail   Author: GemHunt   File: image_set.py    (MIT License) View Source Project 6 votes vote down vote up
def create_composite_image_coin_id(coin_id, crop_dir, data_dir):
    images = []
    images_gif = []

    for id in range(0,56):
        image_id = coin_id * 100 + id
        crop = ci.get_rotated_crop(crop_dir, image_id, 56, 0)
        images.append(crop)
        filename =  ci.get_filename_from(image_id,crop_dir)
        images_gif.append(imageio.imread(filename))

    composite_image = ci.get_composite_image(images, 8, 8)
    cv2.imwrite(data_dir + str(coin_id) + '.png', composite_image)
    imageio.mimsave(data_dir + str(coin_id) + '.gif', images_gif)

    return 
Example 20
Project: onionstack   Author: ntddk   File: onionstack.py    (license) View Source Project 6 votes vote down vote up
def repaint_skin(filename):
    import cv2
    shutil.copy(filename, filename + '.bak')
    frame = cv2.imread(filename)
    HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    l = np.array([0, 50, 80], dtype = "uint8")
    u = np.array([23, 255, 255], dtype = "uint8")
    skin_area = cv2.inRange(HSV, l, u)
    not_skin_area = cv2.bitwise_not(frame, frame, mask = skin_area)
    cv2.imwrite(filename, not_skin_area) 
Example 21
Project: yonkoma2data   Author: esuji5   File: cut.py    (license) View Source Project 6 votes vote down vote up
def cutout(self, img, cut_point, img_path='trim', padding=False, extra_cut=False):
        '''??????????????image_path?????'''
        px = self.padding_x if padding else 0
        py = self.padding_y if padding else 0
        cp_x = cut_point['x']
        cp_y = cut_point['y']
        for i in range(0, len(cp_y)):
            if i % 2 == 0:
                img_cut_1_4 = img[cp_y[i] - py:cp_y[i + 1] + py, cp_x[2] - px:cp_x[3] + px]
                img_cut_5_8 = img[cp_y[i] - py:cp_y[i + 1] + py, cp_x[0] - px:cp_x[1] + px]
                if extra_cut:
                    # ???????1???????????????????
                    img_cut_1_4 = hybrid_cut(img=img_cut_1_4, img_path='dum-{}'.format(i // 2 + 1))
                    img_cut_5_8 = hybrid_cut(img=img_cut_5_8, img_path='dum-{}'.format(i // 2 + 5))
                cv2.imwrite('{}-{}.png'.format(img_path, str(i // 2 + 1)), img_cut_1_4)
                cv2.imwrite('{}-{}.png'.format(img_path, str(i // 2 + 5)), img_cut_5_8) 
Example 22
Project: garden.facelock   Author: kivy-garden   File: download_images.py    (license) View Source Project 6 votes vote down vote up
def store_raw_images():
    '''To download images from image-net
        (Change the url for different needs of cascades)
    '''
    neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n07942152'
    neg_image_urls = urllib2.urlopen(neg_images_link).read().decode()

    pic_num = 1

    for i in neg_image_urls.split('\n'):
        try:

            print i
            urllib.urlretrieve(i, "neg/" + str(pic_num) + '.jpg')
            img = cv2.imread("neg/" + str(pic_num) +'.jpg',
                                cv2.IMREAD_GRAYSCALE)
            resized_image = cv2.resize(img, (100, 100))
            cv2.imwrite("neg/" + str(pic_num) + '.jpg', resized_image)
            pic_num = pic_num + 1

        except:
            print "error" 
Example 23
Project: PixivAvatarBot   Author: kophy   File: avatar.py    (license) View Source Project 6 votes vote down vote up
def generate_avatar(dir, filename):
    """
    ????????????dir/avatar_filename
    :return: ?????????bool?
    """
    pil_image = numpy.array(Image.open(os.path.join(dir, filename)));
    image = None;
    try:
        image = cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2BGR);
    except:
        image = numpy.array(pil_image);
    avatar = crop_avatar(image);
    if avatar is None:
        return False;
    else:
        cv2.imwrite(os.path.join(dir, "avatar_" + filename), avatar);
        return True; 
Example 24
Project: DeblurGAN   Author: KupynOrest   File: blur_image.py    (license) View Source Project 6 votes vote down vote up
def __plot_canvas(self, show, save):
        if len(self.result) == 0:
            raise Exception('Please run blur_image() method first.')
        else:
            plt.close()
            plt.axis('off')
            fig, axes = plt.subplots(1, len(self.result), figsize=(10, 10))
            if len(self.result) > 1:
                for i in range(len(self.result)):
                        axes[i].imshow(self.result[i])
            else:
                plt.axis('off')

                plt.imshow(self.result[0])
            if show and save:
                if self.path_to_save is None:
                    raise Exception('Please create Trajectory instance with path_to_save')
                cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)
                plt.show()
            elif save:
                if self.path_to_save is None:
                    raise Exception('Please create Trajectory instance with path_to_save')
                cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)
            elif show:
                plt.show() 
Example 25
Project: temporal-segment-networks   Author: yjxiong   File: build_of.py    (license) View Source Project 6 votes vote down vote up
def dump_frames(vid_path):
    import cv2
    video = cv2.VideoCapture(vid_path)
    vid_name = vid_path.split('/')[-1].split('.')[0]
    out_full_path = os.path.join(out_path, vid_name)

    fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    try:
        os.mkdir(out_full_path)
    except OSError:
        pass
    file_list = []
    for i in xrange(fcount):
        ret, frame = video.read()
        assert ret
        cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
        access_path = '{}/{:06d}.jpg'.format(vid_name, i)
        file_list.append(access_path)
    print '{} done'.format(vid_name)
    sys.stdout.flush()
    return file_list 
Example 26
Project: Machine-Learning   Author: Jegathis   File: color_quantization.py    (license) View Source Project 6 votes vote down vote up
def color_quant(input,K,output):
    img = cv2.imread(input)
    Z = img.reshape((-1,3))
    # convert to np.float32
    Z = np.float32(Z)
    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)

    ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))

    cv2.imshow('res2',res2)
    cv2.waitKey(0)
    cv2.imwrite(output, res2)
    cv2.destroyAllWindows() 
Example 27
Project: kaggle-dstl-satellite-imagery-feature-detection   Author: u1234x1234   File: view_pred_poly.py    (license) View Source Project 6 votes vote down vote up
def f(image_id):
#    if os.path.exists('test_poly_{}_{}/{}.png'.format(version, epoch, image_id)):
#        print(image_id)
#        return
    print('begin: {}'.format(image_id))

    p = d[image_id]
    p = [wkt.loads(x) for x in p]
    y_sf, x_sf = get_scale_factor(image_id, size, size)
    p = [affinity.scale(x, xfact=x_sf, yfact=y_sf, origin=(0, 0, 0)) for x in p]
    rst = rasterize_polgygon(p, size, size)
    color_rst = colorize_raster(rst)
    im = get_rgb_image(image_id, size, size)

    rr = np.hstack([color_rst, im])
    cv2.imwrite('test_poly_{}_{}-cv/{}.png'.format(version, epoch, image_id), rr)
    print('end: {}'.format(image_id)) 
Example 28
Project: blind-watermark   Author: linyacool   File: decode.py    (license) View Source Project 6 votes vote down vote up
def decode(ori_path, img_path, res_path, alpha):
    ori = cv2.imread(ori_path)
    img = cv2.imread(img_path)
    ori_f = np.fft.fft2(ori)
    img_f = np.fft.fft2(img)
    height, width = ori.shape[0], ori.shape[1]
    watermark = (ori_f - img_f) / alpha
    watermark = np.real(watermark)
    res = np.zeros(watermark.shape)
    random.seed(height + width)
    x = range(height / 2)
    y = range(width)
    random.shuffle(x)
    random.shuffle(y)
    for i in range(height / 2):
        for j in range(width):
            res[x[i]][y[j]] = watermark[i][j]
    cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example 29
Project: soja_box   Author: iTaa   File: soja_resize_image.py    (license) View Source Project 6 votes vote down vote up
def resize_image(img_path, mini_size=480, jpeg_quality=80):
    """
    ??image
    :param img_path: image???
    :param mini_size: ??????
    :param jpeg_quality: jpeg?????
    """
    org_img = cv2.imread(img_path)
    img_w = org_img.shape[0]
    img_h = org_img.shape[1]
    if max(img_w, img_h) > mini_size:
        if img_w > img_h:
            img_w = mini_size * img_w // img_h
            img_h = mini_size
        else:
            img_h = mini_size * img_h // img_w
            img_w = mini_size
    dist_size = (img_h, img_w)
    r_image = cv2.resize(org_img, dist_size, interpolation=cv2.INTER_AREA)
    params = [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality]
    img_name = img_path + '_New.jpg'
    cv2.imwrite(img_name, r_image, params=[cv2.IMWRITE_JPEG_QUALITY, params]) 
Example 30
Project: piwall-cvtools   Author: infinnovation   File: piwall.py    (license) View Source Project 6 votes vote down vote up
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None):
    '''Generate test images as solid blocks of colour of known size, save to filename fn.'''
    # Create a zero (black) image of HD size with 3 colour dimensions.  Colour space assumed BGR by default.
    h = 1080
    w = 1920
    img = np.zeros((h,w,3),dtype="uint8")
    # Want to set all of the pixels to bgr tuple, default red, 8 bit colour
    if not bgr:
        bgr = [0,0,255]
    img[:,:] = bgr
    vw = ImageViewer(img)
    vw.windowShow()
    #cv2.imshow("zeroes", frame)
    #ch = 0xff & cv2.waitKey(10000)
    #cv2.destroyAllWindows()
    cv2.imwrite(fn, img) 
Example 31
Project: piwall-cvtools   Author: infinnovation   File: piwall.py    (license) View Source Project 6 votes vote down vote up
def do_warp(M, warp):
    warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
    # convert the warped image to grayscale and then adjust
    # the intensity of the pixels to have minimum and maximum
    # values of 0 and 255, respectively
    warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
    warp = exposure.rescale_intensity(warp, out_range = (0, 255))
    
    # the pokemon we want to identify will be in the top-right
    # corner of the warped image -- let's crop this region out
    (h, w) = warp.shape
    (dX, dY) = (int(w * 0.4), int(h * 0.45))
    crop = warp[10:dY, w - dX:w - 10]
    
    # save the cropped image to file
    cv2.imwrite("cropped.png", crop)
    
    # show our images
    cv2.imshow("image", image)
    cv2.imshow("edge", edged)
    cv2.imshow("warp", imutils.resize(warp, height = 300))
    cv2.imshow("crop", imutils.resize(crop, height = 300))
    cv2.waitKey(0) 
Example 32
Project: piwall-cvtools   Author: infinnovation   File: piwall.py    (license) View Source Project 6 votes vote down vote up
def locate(self, all = False, show = False, outimg = None):
        for (transition, mask) in self.transitions:
            if transition == 1:
                sfv3 = SquareFinderV3(mask, cos_limit = 0.5)
                squares = sfv3.find(self.mode)
                if show:
                    SquaresOverlayV4(mask, squares, all = all)
                    SquaresOverlayV4(mask, squares, all = False)
                else:
                    square_contours = [square.contour for square in squares]
                    best_contours_tuples = classify_multi_monitors_contour_set(square_contours)
                    found = mask.copy()
                    self.best_contours = [contour.astype('int32') for (contour, index) in best_contours_tuples]
                    cv2.drawContours( found, self.best_contours, -1, (0,0,255),3)
                    if outimg:
                        cv2.imwrite(outimg, found)
                return self.best_contours 
Example 33
Project: piwall-cvtools   Author: infinnovation   File: model.py    (license) View Source Project 6 votes vote down vote up
def idealised_model_B():
    '''As idealised_model_A but scale the images down for rapid prototyping.'''
    rw = RegularWall(1920, 1080, 2, 2, 100, 100, 150, 150)
    blackBackground = 'data/blackHDSolidBlock.jpg'
    whiteBackground = 'data/whiteHDSolidBlock.jpg'
    yellowBackground = 'data/yellowHDSolidBlock.jpg'
    rw.wall.add_bg(yellowBackground)
    rw.wall.render(pixelColor=BLACK, pixelThickness=-1)
    pdb.set_trace()
    black = resize_to_width(rw.wall.img, 1024)
    cv2.imwrite('data/ideal_black_w1024.png', black)
    vw = ImageViewer(rw.wall.img)
    vw.windowShow()
    rw.wall.render(pixelColor=WHITE, pixelThickness=-1)
    white = resize_to_width(rw.wall.img, 1024)
    cv2.imwrite('data/ideal_white_w1024.png', white)
    vw = ImageViewer(rw.wall.img)
    vw.windowShow() 
Example 34
Project: image_recognition   Author: tue-robotics   File: image_writer.py    (license) View Source Project 6 votes vote down vote up
def write_raw(dir_path, image):
    """
    Write an image to a file (path) with the label as subfolder
    :param dir_path: The base directory we are going to write to
    :param image: The OpenCV image
    """

    if dir_path is None:
        return False

    # Check if path exists, otherwise created it
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # Check if path exists, otherwise created it
    raw_dir = dir_path + "/raw"
    if not os.path.exists(raw_dir):
        os.makedirs(raw_dir)

    filename = "%s/%s.jpg" % (raw_dir, datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S_%f"))
    cv2.imwrite(filename, image)

    return True 
Example 35
Project: imgpedia   Author: scferrada   File: DeCAF7Computer.py    (license) View Source Project 6 votes vote down vote up
def compute(self, image):
		directory = ''.join(random.choice(string.lowercase) for _ in range(8))
		if not os.path.exists(directory):
			os.makedirs(directory)
		image = cv2.resize(image, self.size)
		patches = []
		patches.append(image)
		patches.append(image[:self.patch_size, :self.patch_size])
		patches.append(image[32:,32:])
		patches.append(image[32:, :self.patch_size])
		patches.append(image[:self.patch_size, 32:])
		patches.append(image[16:-16, 16:-16])
		patches.append(image[16:-16, 32:])
		patches.append(image[16:-16, :self.patch_size])
		patches.append(image[32:, 16:-16])
		patches.append(image[:self.patch_size, 16:-16])

		descriptor = np.zeros((1,4096))
		for i in range(len(patches)):
			filepath = os.path.join(directory, ("%d.jpg" % i))
			cv2.imwrite(filepath, patches[i])
			descriptor = descriptor + self.compute_oversample(filepath)
		shutil.rmtree(directory)
		return descriptor/len(patches) 
Example 36
Project: ATX   Author: NetEaseGame   File: simple-ide.py    (license) View Source Project 6 votes vote down vote up
def interactive_save(image):
    img_str = cv2.imencode('.png', image)[1].tostring()
    imgpil = Image.open(StringIO(img_str))

    root = Tkinter.Tk()
    root.geometry('{}x{}'.format(400, 400))
    imgtk = ImageTk.PhotoImage(image=imgpil)
    panel = Tkinter.Label(root, image=imgtk) #.pack()
    panel.pack(side="bottom", fill="both", expand="yes")
    Tkinter.Button(root, text="Hello!").pack()
    save_to = tkSimpleDialog.askstring("Save cropped image", "Enter filename")
    if save_to:
        if save_to.find('.') == -1:
            save_to += '.png'
        print 'Save to:', save_to
        cv2.imwrite(save_to, image)
    root.destroy() 
Example 37
Project: ATX   Author: NetEaseGame   File: test_android.py    (license) View Source Project 6 votes vote down vote up
def test_minicap():
    from atx.drivers.android_minicap import AndroidDeviceMinicap

    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    while True:
        try:
            h, w = d._screen.shape[:2]
            img = cv2.resize(d._screen, (w/2, h/2))
            cv2.imshow('preview', img)
            key = cv2.waitKey(1)
            if key == 100: # d for dump
                filename = time.strftime('%Y%m%d%H%M%S.png')
                cv2.imwrite(filename, d._screen)
        except KeyboardInterrupt:
            break
    cv2.destroyWindow('preview') 
Example 38
Project: rekognition-video-utils   Author: awslabs   File: opencv_utils.py    (license) View Source Project 6 votes vote down vote up
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
    vidcap = cv2.VideoCapture(video)
    fps = get_frame_rate(vidcap)
    inc = int(fps * secs)
    length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    count = 0
    while vidcap.isOpened() and count <= length:
        if count % inc == 0:
            success, image = vidcap.read()
            if success:
                cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                if fmt == 'PIL':
                    im = Image.fromarray(cv2_im)
                #elif fmt == 'DISK':
                    #cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
                else:
                    im = cv2_im
                yield count, im 
            else:
                break
        count += 1
    cv2.destroyAllWindows()
    vidcap.release()

# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2] 
Example 39
Project: cervix-roi-segmentation-by-unet   Author: scottykwok   File: prepare_data.py    (license) View Source Project 6 votes vote down vote up
def resize_addset(source_folder, target_folder, dsize, pattern=FILE_PATTERN):
    print('Resizing additional set...')
    if not os.path.exists(target_folder): os.makedirs(target_folder)
    for clazz in ClassNames:
        if clazz not in os.listdir(target_folder):
            os.makedirs(os.path.join(target_folder, clazz))

        total_images = glob.glob(os.path.join(source_folder, clazz, pattern))
        total = len(total_images)
        for i, source in enumerate(total_images):
            filename = ntpath.basename(source)
            target = os.path.join(target_folder, clazz, filename.replace('.jpg', '.png'))

            try:
                img = cv2.imread(source)
                img_resized = cv2.resize(img, dsize, interpolation=cv2.INTER_CUBIC)
                cv2.imwrite(target, img_resized)
            except:
                print('-------------------> error in: {}'.format(source))

            if i % 20 == 0:
                print("Resized {}/{} images".format(i, total)) 
Example 40
Project: Stereo-Pose-Machines   Author: ppwwyyxx   File: main.py    (license) View Source Project 6 votes vote down vote up
def dump_2dcoor():
    camera = libcpm.Camera()
    camera.setup()
    runner = get_parallel_runner('../data/cpm.npy')
    cv2.namedWindow('color')
    cv2.startWindowThread()
    cnt = 0
    while True:
        cnt += 1
        m1 = camera.get_for_py(0)
        m1 = np.array(m1, copy=False)
        m2 = camera.get_for_py(1)
        m2 = np.array(m2, copy=False)

        o1, o2 = runner(m1, m2)
        pts = []
        for k in range(14):
            pts.append((argmax_2d(o1[:,:,k]),
                argmax_2d(o2[:,:,k])))
        pts = np.asarray(pts)
        np.save('pts{}.npy'.format(cnt), pts)
        cv2.imwrite("frame{}.png".format(cnt), m1);
        if cnt == 10:
            break 
Example 41
Project: chainer-faster-rcnn   Author: mitmul   File: test_anchor_target_layer.py    (license) View Source Project 6 votes vote down vote up
def test_generate_proposals(self):
        self.assertEqual(self.total_anchors, len(self.shifts) *
                         self.anchor_target_layer.anchors.shape[0])

        min_x = self.all_anchors[:, 0].min()
        min_y = self.all_anchors[:, 1].min()
        max_x = self.all_anchors[:, 2].max()
        max_y = self.all_anchors[:, 3].max()
        canvas = np.zeros(
            (int(abs(min_y) + max_y) + 1,
             int(abs(min_x) + max_x) + 1), dtype=np.uint8)
        self.all_anchors[:, 0] -= min_x
        self.all_anchors[:, 1] -= min_y
        self.all_anchors[:, 2] -= min_x
        self.all_anchors[:, 3] -= min_y
        for anchor in self.all_anchors:
            anchor = list(six.moves.map(int, anchor))
            cv.rectangle(
                canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
        cv.imwrite('tests/all_anchors.png', canvas) 
Example 42
Project: chainer-faster-rcnn   Author: mitmul   File: test_anchor_target_layer.py    (license) View Source Project 6 votes vote down vote up
def test_keep_inside(self):
        inds_inside, anchors = self.inds_inside, self.anchors

        min_x = anchors[:, 0].min()
        min_y = anchors[:, 1].min()
        max_x = anchors[:, 2].max()
        max_y = anchors[:, 3].max()
        canvas = np.zeros(
            (int(max_y - min_y) + 1,
             int(max_x - min_x) + 1), dtype=np.uint8)
        anchors[:, 0] -= min_x
        anchors[:, 1] -= min_y
        anchors[:, 2] -= min_x
        anchors[:, 3] -= min_y
        for i, anchor in enumerate(anchors):
            anchor = list(six.moves.map(int, anchor))
            _canvas = np.zeros(
                (int(max_y - min_y) + 1,
                 int(max_x - min_x) + 1), dtype=np.uint8)
            cv.rectangle(
                _canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
            cv.rectangle(
                canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
            cv.imwrite('tests/anchors_inside_{}.png'.format(i), _canvas)
        cv.imwrite('tests/anchors_inside.png'.format(i), canvas) 
Example 43
Project: Yugioh-bot   Author: will7200   File: provider.py    (license) View Source Project 6 votes vote down vote up
def img_to_string(img, char_set=None):
        cv2.imwrite("tmp\\ocr.png", img)
        command = "bin\\tess\\tesseract.exe --tessdata-dir bin\\tess\\tessdata tmp\\ocr.png tmp\\ocr "
        if char_set is not None:
            command += "-c tessedit_char_whitelist=" + char_set + " "
        command += "-psm 7 "
        command += "> nul 2>&1"
        CREATE_NO_WINDOW = 0x08000000
        subprocess.call(command, shell=True, creationflags=CREATE_NO_WINDOW)
        # Get the largest line in txt
        with open("tmp\\ocr.txt") as f:
            content = f.read().splitlines()
        output_line = ""
        for line in content:
            line = line.strip()
            if len(line) > len(output_line):
                output_line = line
        return output_line 
Example 44
Project: AutomatorX   Author: xiaoyaojjian   File: simple-ide.py    (license) View Source Project 6 votes vote down vote up
def interactive_save(image):
    img_str = cv2.imencode('.png', image)[1].tostring()
    imgpil = Image.open(StringIO(img_str))

    root = Tkinter.Tk()
    root.geometry('{}x{}'.format(400, 400))
    imgtk = ImageTk.PhotoImage(image=imgpil)
    panel = Tkinter.Label(root, image=imgtk) #.pack()
    panel.pack(side="bottom", fill="both", expand="yes")
    Tkinter.Button(root, text="Hello!").pack()
    save_to = tkSimpleDialog.askstring("Save cropped image", "Enter filename")
    if save_to:
        if save_to.find('.') == -1:
            save_to += '.png'
        print 'Save to:', save_to
        cv2.imwrite(save_to, image)
    root.destroy() 
Example 45
Project: AutomatorX   Author: xiaoyaojjian   File: pixelmatch.py    (license) View Source Project 6 votes vote down vote up
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img) 
Example 46
Project: AVSR-Deep-Speech   Author: pandeydivesh15   File: image_handler.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def visualize_image(image, name="Image", resize=False, save_image=False, path=None):
	"""Helper function to visualize and save any image"""
	image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT])
	image = image.astype(np.uint8)

	if resize: 
		image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10))

	cv2.imshow(name, image)
	if cv2.waitKey(0) & 0xFF == ord('q'):
		cv2.destroyAllWindows()

	if save_image:
		assert path is not None
		cv2.imwrite(path, image) 
Example 47
Project: vehicle_brand_classification_CNN   Author: nanoc812   File: logoSet.py    (MIT License) View Source Project 5 votes vote down vote up
def imgPreprocess(img_dir, foldername):
    myfiles = glob.glob(img_dir+'*.jpg')
    
    temp = img_dir.split('/')
    newDir = '/'.join(temp[:(len(temp)-2)])
    if not os.path.exists(newDir+'/'+foldername+'/'):
        os.mkdir(newDir+'/'+foldername+'/')
    
    for filepath in myfiles:
        img = cv2.imread(filepath)
        logo = imgSeg(img)
        sd = filepath.rfind('/'); ed = filepath.find('.'); filename = filepath[int(sd+1):int(ed)]
        cv2.imwrite(newDir+'/'+foldername+'/'+filename+'.jpg',logo)
        print("car logo segmentation success,%s"%filename) 
Example 48
Project: pybot   Author: spillai   File: io_utils.py    (license) View Source Project 5 votes vote down vote up
def _write_images(self, im): 
        cv2.imwrite(os.path.join(self.directory, 'imgs-%06i.png' % self.idx), im)
        self.idx += 1 
Example 49
Project: pybot   Author: spillai   File: image_utils.py    (license) View Source Project 5 votes vote down vote up
def _save(self): 
        if not len(self.ims_): 
            return

        fn = self.filename_template_ % self.idx_
        cv2.imwrite(fn, self.mosaic_cb_(self.ims_))
        print('Saving mosaic: %s' % fn)
        self.idx_ += 1 
Example 50
Project: shenlan   Author: vector-1127   File: cgan.py    (license) View Source Project 5 votes vote down vote up
def generate(BATCH_SIZE, nice=False):
    (X_train, Y_train) = get_data('test')
    #print(np.shape(X_train))
    X_train = (X_train.astype(np.float32) - 127.5)/127.5
    Y_train = (Y_train.astype(np.float32) - 127.5)/127.5
    
    generator = generator_model()
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    generator.load_weights('generator')
    if nice:
        discriminator = discriminator_model()
        discriminator.compile(loss='binary_crossentropy', optimizer="SGD")
        discriminator.load_weights('discriminator')

        generated_images = generator.predict(X_train, verbose=1)
        d_pret = discriminator.predict(generated_images, verbose=1)
        index = np.arange(0, BATCH_SIZE*20)
        index.resize((BATCH_SIZE*20, 1))
        pre_with_index = list(np.append(d_pret, index, axis=1))
        pre_with_index.sort(key=lambda x: x[0], reverse=True)
        nice_images = np.zeros((BATCH_SIZE, 1) + (generated_images.shape[2:]), dtype=np.float32)
        for i in range(int(BATCH_SIZE)):
            idx = int(pre_with_index[i][1])
            nice_images[i, 0, :, :] = generated_images[idx, 0, :, :]
        image = combine_images(nice_images)
    else:
        generated_images = generator.predict(X_train)
        image = combine_images(generated_images)
    image = image*127.5+127.5
    image = np.swapaxes(image,0,2)
    cv2.imwrite('generated.png',image)