Python cv2.imwrite() Examples

The following are 30 code examples for showing how to use cv2.imwrite(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module cv2 , or try the search function .

Example 1
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: histcomparison.py    License: GNU General Public License v2.0 10 votes vote down vote up
def main():
	imagePath = "img.jpg"
	
	img = cv2.imread(imagePath)
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	
	generate_histogram(gray)
	
	cv2.imwrite("before.jpg", gray)

	gray = cv2.equalizeHist(gray)
	
	generate_histogram(gray)
	
	cv2.imwrite("after.jpg",gray)
	
	return 0 
Example 2
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 10 votes vote down vote up
def _lapulaseDetection(self, imgName):
        """
        :param strdir: 文件所在的目录
        :param name: 文件名称
        :return: 检测模糊后的分数
        """
        # step1: 预处理
        img2gray, reImg = self.preImgOps(imgName)
        # step2: laplacian算子 获取评分
        resLap = cv2.Laplacian(img2gray, cv2.CV_64F)
        score = resLap.var()
        print("Laplacian %s score of given image is %s", str(score))
        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_lapulaseDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        # 显示
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)

        # step3: 返回分数
        return score 
Example 3
Project: DOTA_models   Author: ringringyi   File: utils.py    License: Apache License 2.0 9 votes vote down vote up
def saveimageWithMask(img, outname, mask_poly):

    dstimg = copy.deepcopy(img)
    for mask in mask_poly:
        bound = mask.bounds
        if (len(bound) < 4):
            continue
        xmin, ymin, xmax, ymax = bound[0], bound[1], bound[2], bound[3]
        for x in range(int(xmin), int(xmax)):
            for y in range(int(ymin), int(ymax)):
                point = shgeo.Point(x, y)
                if point.within(mask):
                    #print('withing')

                    dstimg[int(y)][int(x)] = 0

    cv2.imwrite(outname, dstimg) 
Example 4
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: detect.py    License: GNU General Public License v2.0 7 votes vote down vote up
def main():
	#IMG PATHS
	imagePath = "test3.jpg"
	cascPath = "cascades/haarcascade_pedestrian.xml"

	pplCascade = cv2.CascadeClassifier(cascPath)
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	
	gray = normalize_grayimage(gray)
	 
	pedestrians = pplCascade.detectMultiScale(
		gray,
		scaleFactor=1.2,
		minNeighbors=10,
		minSize=(32,96),
		flags = cv2.cv.CV_HAAR_SCALE_IMAGE
	)

	print "Found {0} ppl!".format(len(pedestrians))

	#Draw a rectangle around the detected objects
	for (x, y, w, h) in pedestrians:
		cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)

	cv2.imwrite("saida.jpg", image)
	cv2.imshow("Ppl found", image)
	cv2.waitKey(0)
	
	return 0 
Example 5
Project: pruning_yolov3   Author: zbyuan   File: datasets.py    License: GNU General Public License v3.0 7 votes vote down vote up
def convert_images2bmp():
    # cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s
    for path in ['../coco/images/val2014/', '../coco/images/train2014/']:
        folder = os.sep + Path(path).name
        output = path.replace(folder, folder + 'bmp')
        if os.path.exists(output):
            shutil.rmtree(output)  # delete output folder
        os.makedirs(output)  # make new output folder

        for f in tqdm(glob.glob('%s*.jpg' % path)):
            save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp')
            cv2.imwrite(save_name, cv2.imread(f))

    for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
        with open(label_path, 'r') as file:
            lines = file.read()
        lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace(
            '/Users/glennjocher/PycharmProjects/', '../')
        with open(label_path.replace('5k', '5k_bmp'), 'w') as file:
            file.write(lines) 
Example 6
def visual(title, X, activation):
    '''create a grid of images and save it as a final image
    title : grid image name
    X : array of images
    '''
    assert len(X.shape) == 4

    X = X.transpose((0, 2, 3, 1))
    if activation == 'sigmoid':
        X = np.clip((X)*(255.0), 0, 255).astype(np.uint8)
    elif activation == 'tanh':
        X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
    n = np.ceil(np.sqrt(X.shape[0]))
    buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
    for i, img in enumerate(X):
        fill_buf(buff, i, img, X.shape[1:3])
    cv2.imwrite('%s.jpg' % (title), buff) 
Example 7
Project: pruning_yolov3   Author: zbyuan   File: utils.py    License: GNU General Public License v3.0 6 votes vote down vote up
def crop_images_random(path='../images/', scale=0.50):  # from utils.utils import *; crop_images_random()
    # crops images into random squares up to scale fraction
    # WARNING: overwrites images!
    for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
        img = cv2.imread(file)  # BGR
        if img is not None:
            h, w = img.shape[:2]

            # create random mask
            a = 30  # minimum size (pixels)
            mask_h = random.randint(a, int(max(a, h * scale)))  # mask height
            mask_w = mask_h  # mask width

            # box
            xmin = max(0, random.randint(0, w) - mask_w // 2)
            ymin = max(0, random.randint(0, h) - mask_h // 2)
            xmax = min(w, xmin + mask_w)
            ymax = min(h, ymin + mask_h)

            # apply random color mask
            cv2.imwrite(file, img[ymin:ymax, xmin:xmax]) 
Example 8
Project: Pytorch-Networks   Author: HaiyangLiu1997   File: test.py    License: MIT License 6 votes vote down vote up
def test(test_loader, model, logger=None, Writer=None):
    
    model.eval()
    with torch.no_grad():
        for its, (img_line, img_noise) in enumerate(test_loader):
            img_line = img_line.cuda() if torch.cuda.is_available() else img_line
            img_noise = img_noise.cuda() if torch.cuda.is_available() else img_noise
            g_results = model(torch.cat((img_line, img_noise), 1))
            for i in range(img_line.shape[0]):
                img_line_test = img_line[i].cpu().numpy().transpose((1,2,0)) * 255
                img_line_test = img_line_test.squeeze()
                cv2.imwrite((cfg.PATH.RES_TEST+"line_{}.jpg".format(i+its)), img_line_test)

                img_res_test = g_results[i].cpu().numpy().transpose((1,2,0)) * 255
                cv2.imwrite((cfg.PATH.RES_TEST+"res_{}.jpg".format(i+its)), img_res_test)
                print("{}/{}".format(i+its,its_num)) 
Example 9
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: boxing.py    License: MIT License 6 votes vote down vote up
def draw():
    f = open(box_path + 'jpglist.txt')

    # read each image and its label
    line = f.readline()
    line_num =0
    while line:
        line_num=line_num+1
        print('Image:', line_num)
        name = line.strip('\n')
        img = cv2.imread(image_path + name)
        img_size = img.shape
        img_size = img_size[0]*img_size[1]

        # read each coordinate and draw box
        f_txt = open(image_path + name.strip('.jpg') + '.txt')
        #line_txt = f_txt.readline()  # pass the first ROI information
        line_txt = f_txt.readline()
        while line_txt:
            coor = line_txt.split(',')
            x1 = int(coor[0].strip('\''))
            y1 = int(coor[1].strip('\''))
            x3 = int(coor[4].strip('\''))
            y3 = int(coor[5].strip('\''))
            text = coor[8].strip('\n').strip('\'')
            text_show = text + '(' + str(x1) + ',' + str(y1) +')'

            cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1)
            #cv2.putText(img, text_show, (x1, y1 - 1),
              #          cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1)
            line_txt = f_txt.readline()
        cv2.imwrite(box_path + name, img)
        line = f.readline()
        # img = cv2.imshow('image', img)
        # cv2.waitKey(0) 
Example 10
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: main.py    License: MIT License 6 votes vote down vote up
def draw():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("for_task3/*.txt")]
    txt_files = [s + ".txt" for s in filenames]
    for txt in txt_files:
        image = cv2.imread('test_original/'+ txt.split('/')[1].split('.')[0]+'.jpg', cv2.IMREAD_COLOR)
        with open(txt, 'r') as txt_file:
            for line in csv.reader(txt_file):
                box = [int(string, 10) for string in line[0:8]]
                if len(line) < 9:
                    print(txt)
                cv2.rectangle(image, (box[0], box[1]), (box[4], box[5]), (0,255,0), 2)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(image, line[8].upper(), (box[0],box[1]), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
        cv2.imwrite('task2_result_draw/'+ txt.split('/')[1].split('.')[0]+'.jpg', image) 
Example 11
Project: tf2-yolo3   Author: akkaze   File: det_visualizer.py    License: Apache License 2.0 6 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        if self.tiny:
            anchors = yolo_tiny_anchors
            masks = yolo_tiny_anchor_masks
        else:
            anchors = yolo_anchors
            masks = yolo_anchor_masks
        model = make_eval_model_from_trained_model(self.model, anchors, masks)

        epoch_dir = os.path.join(self.result_dir, str(epoch))

        os.makedirs(epoch_dir)
        for batch, (images, labels) in enumerate(self.dataset):
            images = images.numpy()
            for i in range(images.shape[0]):
                boxes, scores, classes = model.predict(images[i:i + 1, ...])
                img_for_this = (images[i, ...] * 255).astype(np.uint8)

                boxes_for_this, scores_for_this, classes_for_this = boxes[0, ...], scores[0, ...], classes[0, ...]

                img_for_this = draw_outputs(img_for_this, (boxes_for_this, scores_for_this, classes_for_this))
                cv2.imwrite(os.path.join(epoch_dir, '{0}.jpg'.format(uuid.uuid4())), img_for_this)
            if batch == self.num_batches:
                break 
Example 12
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _blurDetection(self, imgName):

        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        imgMat=self._imageToMatrix(img2gray)/255.0
        x, y = imgMat.shape
        score = 0
        for i in range(x - 2):
            for j in range(y - 2):
                score += (imgMat[i + 2, j] - imgMat[i, j]) ** 2
        # step3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score=score/10
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_blurDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 13
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _SMD2Detection(self, imgName):
        """
        灰度方差乘积
        :param imgName:
        :return:
        """
        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        f=self._imageToMatrix(img2gray)/255.0
        x, y = f.shape
        score = 0
        for i in range(x - 1):
            for j in range(y - 1):
                score += np.abs(f[i+1,j]-f[i,j])*np.abs(f[i,j]-f[i,j+1])
        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score=score
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_SMD2Detection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 14
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _Variance(self, imgName):
        """
               灰度方差乘积
               :param imgName:
               :return:
               """
        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        f = self._imageToMatrix(img2gray)

        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score = np.var(f)
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_Variance_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 15
Project: CartoonGAN-Tensorflow   Author: taki0112   File: edge_smooth.py    License: MIT License 6 votes vote down vote up
def make_edge_smooth(dataset_name, img_size) :
    check_folder('./dataset/{}/{}'.format(dataset_name, 'trainB_smooth'))

    file_list = glob('./dataset/{}/{}/*.*'.format(dataset_name, 'trainB'))
    save_dir = './dataset/{}/trainB_smooth'.format(dataset_name)

    kernel_size = 5
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    gauss = cv2.getGaussianKernel(kernel_size, 0)
    gauss = gauss * gauss.transpose(1, 0)

    for f in tqdm(file_list) :
        file_name = os.path.basename(f)

        bgr_img = cv2.imread(f)
        gray_img = cv2.imread(f, 0)

        bgr_img = cv2.resize(bgr_img, (img_size, img_size))
        pad_img = np.pad(bgr_img, ((2, 2), (2, 2), (0, 0)), mode='reflect')
        gray_img = cv2.resize(gray_img, (img_size, img_size))

        edges = cv2.Canny(gray_img, 100, 200)
        dilation = cv2.dilate(edges, kernel)

        gauss_img = np.copy(bgr_img)
        idx = np.where(dilation != 0)
        for i in range(np.sum(dilation != 0)):
            gauss_img[idx[0][i], idx[1][i], 0] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 0], gauss))
            gauss_img[idx[0][i], idx[1][i], 1] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 1], gauss))
            gauss_img[idx[0][i], idx[1][i], 2] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 2], gauss))

        cv2.imwrite(os.path.join(save_dir, file_name), gauss_img) 
Example 16
Project: bjtu_BinocularCameraRecord   Author: anonymouslycn   File: Main.py    License: MIT License 6 votes vote down vote up
def loop2(self,text,w=1280,h=720):
        cap = cv2.VideoCapture(int(text))
        cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
        global capnum2
        capnum2 = int(text)
        cap.set(3,w);
        cap.set(4,h);
        global update2
        update2 = 1
        global shotmark2

        while (update2 == 1):
            ret, frame = cap.read() 
            if shotmark2 == 1:
                fn = self.lineEdit.text()
                name = "photo/2_"+fn + "video.jpg"
                if os.path.exists(name):
                    name = "photo/2_" + fn + "video"+str(int(time.time()))+".jpg"
                cv2.imwrite(name, frame)
                shotmark2 = 0
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.original2_image.updateImage(frame)
        # cap.release()
        cv_img_rgb = np.zeros((700,700,3))
        self.original2_image.updateImage(cv_img_rgb) 
Example 17
Project: exposure   Author: yuanming-hu   File: util.py    License: MIT License 6 votes vote down vote up
def degrade_images_in_folder(
    folder,
    dst_folder_suffix,
    LIGHTDOWN=True,
    UNBALANCECOLOR=True,):
  import os
  js = os.listdir(folder)
  dst_folder = folder + '-' + dst_folder_suffix
  try:
    os.mkdir(dst_folder)
  except:
    print('dir exist!')
  print('in ' + dst_folder)
  num = 3
  for j in js:
    img = cv2.imread(folder + '/' + j) / 255.
    if LIGHTDOWN:
      for _ in range(num - 1):
        out = pow(img, np.random.uniform(0.4, 0.6)) * np.random.uniform(
            0.25, 0.5)
        cv2.imwrite(dst_folder + '/' + ('L%d-' % _) + j, out * 255.)
      out = img * img
      out = out * (1.0 / out.max())
      cv2.imwrite(dst_folder + '/' + ('L%d-' % num) + j, out * 255.)
    if UNBALANCECOLOR:
      filter = WB2()
      outs = np.array([img] * num)
      features = np.abs(np.random.rand(num, 3))
      for _, out in enumerate(
          filter.process(outs, filter.filter_param_regressor(features))):
        # print out.max()
        out /= out.max()
        out *= np.random.uniform(0.7, 1)
        cv2.imwrite(dst_folder + '/' + ('C%d-' % _) + j, out * 255.) 
Example 18
Project: exposure   Author: yuanming-hu   File: util.py    License: MIT License 6 votes vote down vote up
def vis_images_and_indexs(images, features, dir, name):
  # indexs = np.reshape(indexs, (len(indexs),))
  # print('visualizing images and indexs: ', images.shape, indexs.shape)
  id_imgs = []
  for feature in features:
    img = np.ones((64, 64, 3))
    cv2.putText(img,
                str(feature), (4, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.25,
                (1.0, 0.0, 0.0))
    id_imgs.append(img)
  id_imgs = np.stack(id_imgs, axis=0)
  # print('id imgs: ', id_imgs.shape)

  vis_imgs = np.vstack([images, id_imgs])
  image = make_image_grid(vis_imgs, per_row=images.shape[0])
  vis_dir = dir
  try:
    os.mkdir(vis_dir)
  except:
    pass
  cv2.imwrite(os.path.join(vis_dir, name + '.png'), image[:, :, ::-1] * 255.0) 
Example 19
Project: object-detection   Author: cristianpb   File: test_detection.py    License: MIT License 6 votes vote down vote up
def test_motion():
    image = cv2.imread("./imgs/image.jpeg")
    print(image.shape)

    detector = Detector_Motion()

    image2 = cv2.imread("./imgs/image_box.jpg")
    print(image2.shape)
    assert image.shape == image2.shape
    image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    image2 = cv2.GaussianBlur(image2, (21, 21), 0)
    detector.avg = image2.astype(float)

    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    image = detector.draw_boxes(image, df)
    print(df)
    assert df.shape[0] == 1

    cv2.imwrite("./imgs/outputcv.jpg", image) 
Example 20
Project: object-detection   Author: cristianpb   File: camera_opencv.py    License: MIT License 6 votes vote down vote up
def CaptureContinous(self, detector):
    cap = cv2.VideoCapture(0)
    _, image = cap.read()
    cap.release()
    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    if len(df) > 0:
        if (df['class_name']
                .str
                .contains('person|bird|cat|wine glass|cup|sandwich')
                .any()):
            day = datetime.now().strftime("%Y%m%d")
            directory = os.path.join(IMAGE_FOLDER, 'webcam', day)
            if not os.path.exists(directory):
                os.makedirs(directory)
            image = detector.draw_boxes(image, df)
            classes = df['class_name'].unique().tolist()
            hour = datetime.now().strftime("%H%M%S")
            filename_output = os.path.join(
                    directory, "{}_{}_.jpg".format(hour, "-".join(classes))
                    )
            cv2.imwrite(filename_output, image) 
Example 21
Project: facemoji   Author: PiotrDabrowskey   File: process_dataset.py    License: MIT License 6 votes vote down vote up
def extract_faces(emotions):
    """
    Crops faces in emotions images.
    :param emotions: List of emotions names.
    """
    print("Extracting faces")
    for emotion in emotions:
        photos = glob.glob('data/sorted_set/%s/*' % emotion)

        for file_number, photo in enumerate(photos):
            frame = cv2.imread(photo)
            normalized_faces = find_faces(frame)
            os.remove(photo)

            for face in normalized_faces:
                try:
                    cv2.imwrite("data/sorted_set/%s/%s.png" % (emotion, file_number + 1), face[0])  # write image
                except:
                    print("error in processing %s" % photo) 
Example 22
Project: 3D-HourGlass-Network   Author: Naman-ntc   File: my.py    License: MIT License 6 votes vote down vote up
def func1(k=None):
    if not k:
        k=randint(0, 20)
    print('image is',k)
    for i, (img, heatmap,vecmap,depthmap,kpt_3d) in enumerate(train_loader):
        if i==k:
#            test_heatmaps(heatmap,img,i)
#            test_vecmaps(vecmap,img,i)
#            edges = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [10, 11], [11, 12], [12, 8], [8, 13], [13, 14], [14, 15], [6, 8], [8, 9]]
#            ppl=kpt_3d.shape[0]
#            for i in range(ppl):
#                for edge in edges:
#                    cv2.line(img,(int(kpt_3d[i][edge[0]][0]),int(kpt_3d[i][edge[0]][1])),(int(kpt_3d[i][edge[1]][0]),int(kpt_3d[i][edge[1]][1])),(0,255,0))
#            cv2.imwrite('outside3dfinal.png',img)
       
            return img,heatmap,vecmap,depthmap,kpt_3d 
Example 23
Project: 3D-HourGlass-Network   Author: Naman-ntc   File: my.py    License: MIT License 6 votes vote down vote up
def func1(k=None):
    if not k:
        k=randint(0, 20)
    print('image is',k)
    for i, (img, heatmap,vecmap,depthmap,kpt_3d) in enumerate(train_loader):
        if i==k:
#            test_heatmaps(heatmap,img,i)
#            test_vecmaps(vecmap,img,i)
#            edges = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [10, 11], [11, 12], [12, 8], [8, 13], [13, 14], [14, 15], [6, 8], [8, 9]]
#            ppl=kpt_3d.shape[0]
#            for i in range(ppl):
#                for edge in edges:
#                    cv2.line(img,(int(kpt_3d[i][edge[0]][0]),int(kpt_3d[i][edge[0]][1])),(int(kpt_3d[i][edge[1]][0]),int(kpt_3d[i][edge[1]][1])),(0,255,0))
#            cv2.imwrite('outside3dfinal.png',img)
       
            return img,heatmap,vecmap,depthmap,kpt_3d 
Example 24
Project: AerialDetection   Author: dingjiansw101   File: utils.py    License: Apache License 2.0 6 votes vote down vote up
def saveimageWithMask(img, outname, mask_poly):

    dstimg = copy.deepcopy(img)
    for mask in mask_poly:
        bound = mask.bounds
        if (len(bound) < 4):
            continue
        xmin, ymin, xmax, ymax = bound[0], bound[1], bound[2], bound[3]
        for x in range(int(xmin), int(xmax)):
            for y in range(int(ymin), int(ymax)):
                point = shgeo.Point(x, y)
                if point.within(mask):
                    #print('withing')

                    dstimg[int(y)][int(x)] = 0

    cv2.imwrite(outname, dstimg) 
Example 25
Project: Advanced_Lane_Lines   Author: ChengZhongShen   File: helpers.py    License: MIT License 6 votes vote down vote up
def undistort_images(src, dst):
	"""
	undistort the images in src folder to dst folder
	"""
	# load dst, mtx
	pickle_file = open("../camera_cal/camera_cal.p", "rb")
	dist_pickle = pickle.load(pickle_file)
	mtx = dist_pickle["mtx"]  
	dist = dist_pickle["dist"]
	pickle_file.close()
	
	# loop the image folder
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_dist = cv2.undistort(img, mtx, dist, None, mtx)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		image_dist = cv2.cvtColor(image_dist, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, image_dist) 
Example 26
Project: Advanced_Lane_Lines   Author: ChengZhongShen   File: helpers.py    License: MIT License 6 votes vote down vote up
def wrap_images(src, dst):
	"""
	apply the wrap to images
	"""
	# load M, Minv
	img_size = (1280, 720)
	pickle_file = open("../helper/trans_pickle.p", "rb")
	trans_pickle = pickle.load(pickle_file)
	M = trans_pickle["M"]
	Minv = trans_pickle["Minv"]
	# loop the file folder
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_wraped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# no need to covert RGB to BGR since 3 channel is same
		image_wraped = cv2.cvtColor(image_wraped, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, image_wraped) 
Example 27
Project: Advanced_Lane_Lines   Author: ChengZhongShen   File: image_process.py    License: MIT License 6 votes vote down vote up
def test_thresh_images(src, dst, s_thresh, sx_thresh):
	"""
	apply the thresh to images in a src folder and output to dst foler
	"""
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_threshed = color_grid_thresh(img, s_thresh=s_thresh, sx_thresh=sx_thresh)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# convert  binary to RGB, *255, to visiual, 1 will not visual after write to file
		image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB)
		cv2.imwrite(out_image, image_threshed) 
Example 28
Project: Advanced_Lane_Lines   Author: ChengZhongShen   File: image_process.py    License: MIT License 6 votes vote down vote up
def test_yellow_grid_thresh_images(src, dst, y_low=(10,50,0), y_high=(30,255,255), sx_thresh=(20, 100)):
	"""
	apply the thresh to images in a src folder and output to dst foler
	"""
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_threshed = yellow_grid_thresh(img, y_low, y_high, sx_thresh)
		
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# convert  binary to RGB, *255, to visiual, 1 will not visual after write to file
		image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB)
		cv2.imwrite(out_image, image_threshed) 
Example 29
Project: Advanced_Lane_Lines   Author: ChengZhongShen   File: image_process.py    License: MIT License 6 votes vote down vote up
def test_yellow_white_thresh_images(src, dst, y_low=(10,50,0), y_high=(30,255,255), w_low=(180,180,180), w_high=(255,255,255)):
	"""
	apply the thresh to images in a src folder and output to dst foler
	"""
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_threshed = yellow_white_thresh(img, y_low, y_high, w_low, w_high)
		
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# convert  binary to RGB, *255, to visiual, 1 will not visual after write to file
		image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB)
		
		# HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
		# V = HSV[:,:,2]
		# brightness = np.mean(V)
		# info_str = "brightness is: {}".format(int(brightness))
		# cv2.putText(image_threshed, info_str, (50,700), cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,255),2)
		
		cv2.imwrite(out_image, image_threshed) 
Example 30
def main():
        parser = argparse.ArgumentParser()
        parser.add_argument("font_path", help="Path to ttf font file")
        parser.add_argument("output", help="Output filename including extension (e.g. 'sample.jpg')")
        parser.add_argument("--num", help="Up to 4 digit number [Default: random]")
        args = parser.parse_args()

        captcha = ImageCaptcha(fonts=[args.font_path])
        captcha_str = args.num if args.num else DigitCaptcha.get_rand(3, 4)
        img = captcha.generate(captcha_str)
        img = np.fromstring(img.getvalue(), dtype='uint8')
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
        cv2.imwrite(args.output, img)
        print("Captcha image with digits {} written to {}".format([int(c) for c in captcha_str], args.output))