Python cv2.flip() Examples
The following are 30
code examples of cv2.flip().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: camera_test.py From crop_row_detection with GNU General Public License v3.0 | 7 votes |
def main(): capture = cv2.VideoCapture(0) _, image = capture.read() previous = image.copy() while (cv2.waitKey(1) < 0): _, image = capture.read() diff = cv2.absdiff(image, previous) #image = cv2.flip(image, 3) #image = cv2.norm(image) _, diff = cv2.threshold(diff, 32, 0, cv2.THRESH_TOZERO) _, diff = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY) diff = cv2.medianBlur(diff, 5) cv2.imshow('video', diff) previous = image.copy() capture.release() cv2.destroyAllWindows()
Example #2
Source File: SmartBinApp.py From SmartBin with MIT License | 7 votes |
def __init__(self, **kwargs): global cap, frame, frame_size # capture and render the first frame self.frame_size = frame_size frame = cap.read() image = cv2.flip(frame, 0) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, frame_size) buf = image.tostring() self.image_texture = Texture.create(size=(image.shape[1], image.shape[0]), colorfmt='rgb') self.image_texture.blit_buffer(buf, colorfmt='rgb', bufferfmt='ubyte') # coordinates of Trashy self.t_x = 0 self.t_y = 0 self.current_user = 'No user yet' self.tickcount = 0 self.labels = ["can", "bottle", "ken", "grace", "frank", "tim", "shelly"] self.users = ["ken", "grace", "frank", "tim", "shelly"] super(MainView, self).__init__(**kwargs) Clock.schedule_interval(self.tick, 0.06)
Example #3
Source File: capture.py From EdgeRealtimeVideoAnalytics with Apache License 2.0 | 7 votes |
def __next__(self): self.count += 1 # Respect FPS for files if self.isFile: delta = time.time() - self.ts self.sma.add(delta) time.sleep(max(0,(1.0 - self.sma.current*self.fps)/self.fps)) self.ts = time.time() # Read image ret_val, img0 = self.cam.read() if not ret_val and self.isFile: self.cam.set(cv2.CAP_PROP_POS_FRAMES, 0) ret_val, img0 = self.cam.read() assert ret_val, 'Video Error' # Preprocess img = img0 if not self.isFile: img = cv2.flip(img, 1) return self.count, img
Example #4
Source File: aircv.py From Airtest with Apache License 2.0 | 6 votes |
def rotate(img, angle=90, clockwise=True): """ 函数使图片可顺时针或逆时针旋转90、180、270度. 默认clockwise=True:顺时针旋转 """ def count_clock_rotate(img): # 逆时针旋转90° rows, cols = img.shape[:2] rotate_img = np.zeros((cols, rows)) rotate_img = cv2.transpose(img) rotate_img = cv2.flip(rotate_img, 0) return rotate_img # 将角度旋转转化为逆时针旋转90°的次数: counter_rotate_time = (4 - angle / 90) % 4 if clockwise else (angle / 90) % 4 for i in range(int(counter_rotate_time)): img = count_clock_rotate(img) return img
Example #5
Source File: augment.py From luna16 with BSD 2-Clause "Simplified" License | 6 votes |
def augment(self, Xb): # Random number 0-1 whether we flip or not random_flip = np.random.randint(2) == 1 # Translation shift shift_x = np.random.uniform(*params.AUGMENTATION_PARAMS['translation_range']) shift_y = np.random.uniform(*params.AUGMENTATION_PARAMS['translation_range']) # Rotation, zoom rotation = np.random.uniform(*params.AUGMENTATION_PARAMS['rotation_range']) log_zoom_range = [np.log(z) for z in params.AUGMENTATION_PARAMS['zoom_range']] zoom = np.exp(np.random.uniform(*log_zoom_range)) # Color AUGMENTATION_PARAMS random_hue = np.random.uniform(*params.AUGMENTATION_PARAMS['hue_range']) random_saturation = np.random.uniform(*params.AUGMENTATION_PARAMS['saturation_range']) random_value = np.random.uniform(*params.AUGMENTATION_PARAMS['value_range']) return self.augment_with_params(Xb, shift_x, shift_y, rotation, random_flip, zoom, random_hue, random_saturation, random_value)
Example #6
Source File: augmentation.py From face_landmark with Apache License 2.0 | 6 votes |
def Mirror(src,label=None,symmetry=None): img = cv2.flip(src, 1) if label is None: return img,label width=img.shape[1] cod = [] allc = [] for i in range(label.shape[0]): x, y = label[i][0], label[i][1] if x >= 0: x = width - 1 - x cod.append((x, y)) # **** the joint index depends on the dataset **** for (q, w) in symmetry: cod[q], cod[w] = cod[w], cod[q] for i in range(label.shape[0]): allc.append(cod[i][0]) allc.append(cod[i][1]) label = np.array(allc).reshape(label.shape[0], 2) return img,label
Example #7
Source File: ScreenGrab.py From BiblioPixelAnimations with MIT License | 6 votes |
def step(self, amt=1): image = self._capFrame() if self.crop: image = image[self._cropY + self.yoff:self._ih - self._cropY + self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff] else: t, b, l, r = self._pad image = cv2.copyMakeBorder( image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0]) resized = cv2.resize(image, (self.width, self.height), interpolation=cv2.INTER_LINEAR) if self.mirror: resized = cv2.flip(resized, 1) for y in range(self.height): for x in range(self.width): self.layout.set(x, y, tuple(resized[y, x][0:3]))
Example #8
Source File: noname.py From DDRL with Apache License 2.0 | 6 votes |
def __init__(self, horiz=False, vert=False, prob=0.5): """ Only one of horiz, vert can be set. :param horiz: whether or not apply horizontal flip. :param vert: whether or not apply vertical flip. :param prob: probability of flip. """ super(Flip, self).__init__() if horiz and vert: raise ValueError("Please use two Flip instead.") elif horiz: self.code = 1 elif vert: self.code = 0 else: raise ValueError("Are you kidding?") self.prob = prob self._init()
Example #9
Source File: verification.py From insightface with MIT License | 6 votes |
def load_bin(path, image_size): try: with open(path, 'rb') as f: bins, issame_list = pickle.load(f) #py2 except UnicodeDecodeError as e: with open(path, 'rb') as f: bins, issame_list = pickle.load(f, encoding='bytes') #py3 data_list = [] for flip in [0,1]: data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1])) data_list.append(data) for i in range(len(issame_list)*2): _bin = bins[i] img = mx.image.imdecode(_bin) if img.shape[1]!=image_size[0]: img = mx.image.resize_short(img, image_size[0]) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) data_list[flip][i][:] = img if i%1000==0: print('loading bin', i) print(data_list[0].shape) return (data_list, issame_list)
Example #10
Source File: verification.py From insightface with MIT License | 6 votes |
def load_bin(path, image_size): bins, issame_list = pickle.load(open(path, 'rb')) data_list = [] for flip in [0,1]: data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1])) data_list.append(data) for i in xrange(len(issame_list)*2): _bin = bins[i] img = mx.image.imdecode(_bin) if img.shape[1]!=image_size[0]: img = mx.image.resize_short(img, image_size[0]) img = nd.transpose(img, axes=(2, 0, 1)) for flip in [0,1]: if flip==1: img = mx.ndarray.flip(data=img, axis=2) data_list[flip][i][:] = img if i%1000==0: print('loading bin', i) print(data_list[0].shape) return (data_list, issame_list)
Example #11
Source File: collect_face_images.py From face-recognition with BSD 3-Clause "New" or "Revised" License | 6 votes |
def main(directory, name, test): cap = cv2.VideoCapture(0) i = 0 while True: # Capture frame-by-frame ret, frame = cap.read() frame = cv2.flip(frame, 1) # Display the resulting frame cv2.imshow(name, frame) if not test and i != 0 and i % 10 == 0: cv2.imwrite("{}/{}{}.png".format(directory, name, int(i / 10)), frame) i += 1 if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the captureq cap.release() cv2.destroyAllWindows()
Example #12
Source File: video_classifier.py From face-recognition with BSD 3-Clause "New" or "Revised" License | 6 votes |
def main(): cap = cv2.VideoCapture(0) face_recogniser = joblib.load(MODEL_PATH) preprocess = preprocessing.ExifOrientationNormalize() while True: # Capture frame-by-frame ret, frame = cap.read() frame = cv2.flip(frame, 1) img = Image.fromarray(frame) faces = face_recogniser(preprocess(img)) if faces is not None: draw_bb_on_img(faces, img) # Display the resulting frame cv2.imshow('video', np.array(img)) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the captureq cap.release() cv2.destroyAllWindows()
Example #13
Source File: cv2_aug_transforms.py From openseg.pytorch with MIT License | 6 votes |
def __call__(self, img, labelmap=None, maskmap=None): assert isinstance(img, np.ndarray) assert labelmap is None or isinstance(labelmap, np.ndarray) assert maskmap is None or isinstance(maskmap, np.ndarray) if random.random() > self.ratio: return img, labelmap, maskmap height, width, _ = img.shape img = cv2.flip(img, 1) if labelmap is not None: labelmap = cv2.flip(labelmap, 1) # to handle datasets with left/right annatations if self.swap_pair is not None: assert isinstance(self.swap_pair, (tuple, list)) temp = labelmap.copy() for pair in self.swap_pair: assert isinstance(pair, (tuple, list)) and len(pair) == 2 labelmap[temp == pair[0]] = pair[1] labelmap[temp == pair[1]] = pair[0] if maskmap is not None: maskmap = cv2.flip(maskmap, 1) return img, labelmap, maskmap
Example #14
Source File: get_hand_images.py From Emojinator with MIT License | 6 votes |
def main(): total_pics = 1000 cap = cv2.VideoCapture(0) x, y, w, h = 300, 50, 350, 350 pic_no = 0 flag_start_capturing = False frames = 0 while True: ret, frame = cap.read() frame = cv2.flip(frame, 1) #frame = cv2.resize(frame, (image_x, image_y)) cv2.imwrite("hand_images/" + str(pic_no) + ".jpg", frame) cv2.imshow("Capturing gesture", frame) pic_no += 1 if pic_no == total_pics: break
Example #15
Source File: process_data.py From 3D_detection with MIT License | 6 votes |
def get_obj_patch(image_dir, obj, target_size = (224, 224)): ### Prepare image patch xmin = obj['xmin'] # + np.random.randint(-MAX_JIT, MAX_JIT+1) ymin = obj['ymin'] # + np.random.randint(-MAX_JIT, MAX_JIT+1) xmax = obj['xmax'] # + np.random.randint(-MAX_JIT, MAX_JIT+1) ymax = obj['ymax'] # + np.random.randint(-MAX_JIT, MAX_JIT+1) img = cv2.imread(image_dir + obj['image']) img = copy.deepcopy(img[ymin:ymax + 1, xmin:xmax + 1]).astype(np.float32) # flip the image flip = np.random.binomial(1, .5) is_flipped = False if flip > 0.5: img = cv2.flip(img, 1) is_flipped = True # resize the image to standard size img = cv2.resize(img, target_size) img = img - np.array([[[103.939, 116.779, 123.68]]]) return img, is_flipped
Example #16
Source File: image.py From ImageAI with MIT License | 5 votes |
def random_flip(image, flip): if flip == 1: return cv2.flip(image, 1) return image
Example #17
Source File: image.py From ImageAI with MIT License | 5 votes |
def correct_bounding_boxes(boxes, new_w, new_h, net_w, net_h, dx, dy, flip, image_w, image_h): boxes = copy.deepcopy(boxes) # randomize boxes' order np.random.shuffle(boxes) # correct sizes and positions sx, sy = float(new_w)/image_w, float(new_h)/image_h zero_boxes = [] for i in range(len(boxes)): boxes[i]['xmin'] = int(_constrain(0, net_w, boxes[i]['xmin']*sx + dx)) boxes[i]['xmax'] = int(_constrain(0, net_w, boxes[i]['xmax']*sx + dx)) boxes[i]['ymin'] = int(_constrain(0, net_h, boxes[i]['ymin']*sy + dy)) boxes[i]['ymax'] = int(_constrain(0, net_h, boxes[i]['ymax']*sy + dy)) if boxes[i]['xmax'] <= boxes[i]['xmin'] or boxes[i]['ymax'] <= boxes[i]['ymin']: zero_boxes += [i] continue if flip == 1: swap = boxes[i]['xmin'] boxes[i]['xmin'] = net_w - boxes[i]['xmax'] boxes[i]['xmax'] = net_w - swap boxes = [boxes[i] for i in range(len(boxes)) if i not in zero_boxes] return boxes
Example #18
Source File: test_webcam.py From face_landmark_dnn with MIT License | 5 votes |
def webcam_main(): print("Camera sensor warming up...") vs = cv2.VideoCapture(0) time.sleep(2.0) mark_detector = MarkDetector() # loop over the frames from the video stream while True: _, frame = vs.read() start = cv2.getTickCount() frame = imutils.resize(frame, width=750, height=750) frame = cv2.flip(frame, 1) faceboxes = mark_detector.extract_cnn_facebox(frame) if faceboxes is not None: for facebox in faceboxes: # Detect landmarks from image of 64X64 with grayscale. face_img = frame[facebox[1]: facebox[3], facebox[0]: facebox[2]] # cv2.rectangle(frame, (facebox[0], facebox[1]), (facebox[2], facebox[3]), (0, 255, 0), 2) face_img = cv2.resize(face_img, (CNN_INPUT_SIZE, CNN_INPUT_SIZE)) face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY) face_img0 = face_img.reshape(1, CNN_INPUT_SIZE, CNN_INPUT_SIZE, 1) land_start_time = time.time() marks = mark_detector.detect_marks_keras(face_img0) # marks *= 255 marks *= facebox[2] - facebox[0] marks[:, 0] += facebox[0] marks[:, 1] += facebox[1] # Draw Predicted Landmarks mark_detector.draw_marks(frame, marks, color=(255, 255, 255), thick=2) fps_time = (cv2.getTickCount() - start)/cv2.getTickFrequency() cv2.putText(frame, '%.1ffps'%(1/fps_time) , (frame.shape[1]-65,15), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0,255,0)) # show the frame cv2.imshow("Frame", frame) # writer.write(frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
Example #19
Source File: engine_cv3.py From opencv-engine with MIT License | 5 votes |
def rotate(self, degrees): # see http://stackoverflow.com/a/23990392 if degrees == 90: self.image = cv2.transpose(self.image) cv2.flip(self.image, 0, self.image) elif degrees == 180: cv2.flip(self.image, -1, self.image) elif degrees == 270: self.image = cv2.transpose(self.image) cv2.flip(self.image, 1, self.image) else: # see http://stackoverflow.com/a/37347070 # one pixel glitch seems to happen with 90/180/270 # degrees pictures in this algorithm if you check # the typical github.com/recurser/exif-orientation-examples # but the above transpose/flip algorithm is working fine # for those cases already width, height = self.size image_center = (width / 2, height / 2) rot_mat = cv2.getRotationMatrix2D(image_center, degrees, 1.0) abs_cos = abs(rot_mat[0, 0]) abs_sin = abs(rot_mat[0, 1]) bound_w = int((height * abs_sin) + (width * abs_cos)) bound_h = int((height * abs_cos) + (width * abs_sin)) rot_mat[0, 2] += ((bound_w / 2) - image_center[0]) rot_mat[1, 2] += ((bound_h / 2) - image_center[1]) self.image = cv2.warpAffine(self.image, rot_mat, (bound_w, bound_h))
Example #20
Source File: load_images.py From hazymaze with Apache License 2.0 | 5 votes |
def load_doggy(): dog = cv2.imread('images/32x32dog.png', cv2.IMREAD_UNCHANGED) left = np.empty((32, 32, 4, 7)) up = np.empty((32, 32, 4, 7)) down = np.empty((32, 32, 4, 7)) # make right out of flipping left right = np.empty((32, 32, 4, 7)) dog_sprite = [left, up, down] # 3 frames happy dog # 1 frame sleepy dog (only on left direction) for j, direction in enumerate(dog_sprite): for i in range(7): direction[:, :, :, i] = dog[j*32:j*32+32,i*32:i*32+32,:] # Making right animation for i in range(7): leftframe = left[:,:,:, i] rightframe = cv2.flip(leftframe, 1) right[:,:,:,i] = rightframe dog_sprite.append(right) return dog_sprite
Example #21
Source File: load_images.py From hazymaze with Apache License 2.0 | 5 votes |
def load_player(): guy = cv2.imread('images/32x36guy.png', cv2.IMREAD_UNCHANGED) up = np.empty((36, 32, 4, 13)) right = np.empty((36, 32, 4, 13)) down = np.empty((36, 32, 4, 13)) # make left out of flipping right left = np.empty((36, 32, 4, 13)) player_sprite = [up, right, down] # Walking (3 frames) # Punching (4 frames) # Cheer (5 frames) # Dead (1 frame) for j, direction in enumerate(player_sprite): for i in range(13): direction[:, :, :, i] = guy[j*36:j*36+36,i*32:i*32+32,:] # Making left animation for i in range(13): rightframe = right[:,:,:, i] leftframe = cv2.flip(rightframe, 1) left[:,:,:,i] = leftframe player_sprite.append(left) return player_sprite
Example #22
Source File: transforms.py From vedaseg with Apache License 2.0 | 5 votes |
def __call__(self, image, mask): if random.random() > self.p: image = cv2.flip(image, 1) mask = cv2.flip(mask, 1) return image, mask
Example #23
Source File: augment.py From luna16 with BSD 2-Clause "Simplified" License | 5 votes |
def augment_image(original_image, M=0, random_flip=0, random_hue=0, random_saturation=0, random_value=0): im = cv2.warpAffine(original_image.transpose(1, 2, 0), M, (params.PIXELS, params.PIXELS)) # im is now RGB 01c if random_flip: im = cv2.flip(im, 1) if params.COLOR_AUGMENTATION: im = util.hsv_augment(im, random_hue, random_saturation, random_value) # Back to c01 return im.transpose(2, 0, 1)
Example #24
Source File: flip.py From detection with GNU General Public License v2.0 | 5 votes |
def main(): parser = argparse.ArgumentParser(description="Flip all images in folder.") parser.add_argument("folder", help="Folder path") args = parser.parse_args() samples = glob.glob(args.folder + "/*.jpg") for sample in samples: img = cv2.imread(sample) flipped = cv2.flip(img, 1, img) fn, ext = os.path.splitext(sample) name = fn + "-flipped" + ext cv2.imshow("flipped", flipped) print name cv2.imwrite(name, flipped)
Example #25
Source File: detection.py From detection with GNU General Public License v2.0 | 5 votes |
def main(self, mode, path): """Main loop. """ if not self.capture or not self.capture.isOpened() or self.mode != mode: self.capture = cv2.VideoCapture(path) self.mode = mode if not self.capture.isOpened(): print "Couldn't read media " + path while self.capture.isOpened(): if self.stopped: break ret, frame = self.capture.read() if frame is None: #self.capture = cv2.VideoCapture(path) #ret, frame = self.capture.read() if frame is None: break if mode == self.mw.SOURCE_CAMERA: cv2.flip(frame, 1, frame) self.mw.displayImage(frame) QApplication.processEvents() if self.nextFrame: self.setNextFrameMode(False) break
Example #26
Source File: utils.py From deep-landmark with BSD 3-Clause "New" or "Revised" License | 5 votes |
def flip(face, landmark): """ flip face """ face_flipped_by_x = cv2.flip(face, 1) landmark_ = np.asarray([(1-x, y) for (x, y) in landmark]) landmark_[[0, 1]] = landmark_[[1, 0]] landmark_[[3, 4]] = landmark_[[4, 3]] return (face_flipped_by_x, landmark_)
Example #27
Source File: misc.py From meta-transfer-learning with MIT License | 5 votes |
def process_batch_augmentation(input_filename_list, input_label_list, dim_input, batch_sample_num): """The function to process a part of an episode. All the images will be augmented by flipping. Args: input_filename_list: the image files' directory list. input_label_list: the image files' corressponding label list. dim_input: the dimension number of the images. batch_sample_num: the sample number of the inputed images. Returns: img_array: the numpy array of processed images. label_array: the numpy array of processed labels. """ new_path_list = [] new_label_list = [] for k in range(batch_sample_num): class_idxs = list(range(0, FLAGS.way_num)) random.shuffle(class_idxs) for class_idx in class_idxs: true_idx = class_idx*batch_sample_num + k new_path_list.append(input_filename_list[true_idx]) new_label_list.append(input_label_list[true_idx]) img_list = [] img_list_h = [] for filepath in new_path_list: this_img = imread(filepath) this_img_h = cv2.flip(this_img, 1) this_img = np.reshape(this_img, [-1, dim_input]) this_img = this_img / 255.0 img_list.append(this_img) this_img_h = np.reshape(this_img_h, [-1, dim_input]) this_img_h = this_img_h / 255.0 img_list_h.append(this_img_h) img_list_all = img_list + img_list_h label_list_all = new_label_list + new_label_list img_array = np.array(img_list_all).reshape([FLAGS.way_num*batch_sample_num*2, dim_input]) label_array = one_hot(np.array(label_list_all)).reshape([FLAGS.way_num*batch_sample_num*2, -1]) return img_array, label_array
Example #28
Source File: kivy_cv1.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def update(self, dt): ret, frame = self.capture.read() if ret: # convert it to texture buf1 = cv2.flip(frame, 0) buf = buf1.tostring() image_texture = Texture.create( size=(frame.shape[1], frame.shape[0]), colorfmt='bgr') image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte') # display image from the texture self.texture = image_texture
Example #29
Source File: noname.py From DDRL with Apache License 2.0 | 5 votes |
def _augment(self, img, do): if do: img = cv2.flip(img, self.code) return img
Example #30
Source File: rectangle_tracker.py From python-opencv-rectangle-tracker with Apache License 2.0 | 5 votes |
def rotate180(im): """Rotates an image by 180 degrees.""" return cv2.flip(im, -1)