Python cv2.calcOpticalFlowFarneback() Examples

The following are 16 code examples of cv2.calcOpticalFlowFarneback(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: video2tfrecord.py    From video2tfrecord with MIT License 8 votes vote down vote up
def compute_dense_optical_flow(prev_image, current_image):
  old_shape = current_image.shape
  prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
  current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
  assert current_image.shape == old_shape
  hsv = np.zeros_like(prev_image)
  hsv[..., 1] = 255
  flow = None
  flow = cv2.calcOpticalFlowFarneback(prev=prev_image_gray,
                                      next=current_image_gray, flow=flow,
                                      pyr_scale=0.8, levels=15, winsize=5,
                                      iterations=10, poly_n=5, poly_sigma=0,
                                      flags=10)

  mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
  hsv[..., 0] = ang * 180 / np.pi / 2
  hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
  return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) 
Example #2
Source File: image_util.py    From video_seg with Apache License 2.0 6 votes vote down vote up
def compute_opticalflow(prev_image, cur_image, args):
  prev_gray = cv2.cvtColor(prev_image, cv2.COLOR_RGB2GRAY)
  cur_gray  = cv2.cvtColor(cur_image, cv2.COLOR_RGB2GRAY)
  pyr_scale = args.pyr_scale
  pyr_levels = args.pyr_levels
  winsize = args.winsize
  iterations = args.iterations
  poly_n = args.poly_n
  poly_sigma = args.poly_sigma
  flow = cv2.calcOpticalFlowFarneback(prev_gray, cur_gray, flow=None,
                                      pyr_scale=pyr_scale,
                                      levels=pyr_levels,
                                      iterations=iterations,
                                      winsize=winsize,
                                      poly_n=poly_n,
                                      poly_sigma=poly_sigma,
                                      flags=0)
  return flow 
Example #3
Source File: tracking_engine.py    From DetectAndTrack with Apache License 2.0 6 votes vote down vote up
def run_farneback(frames):
    try:
        return cv2.calcOpticalFlowFarneback(
            frames[0], frames[1],
            # options, defaults
            None,  # output
            0.5,  # pyr_scale, 0.5
            10,  # levels, 3
            min(frames[0].shape[:2]) // 5,  # winsize, 15
            10,  # iterations, 3
            7,  # poly_n, 5
            1.5,  # poly_sigma, 1.2
            cv2.OPTFLOW_FARNEBACK_GAUSSIAN,  # flags, 0
        )
    except cv2.error:
        return None 
Example #4
Source File: opticalFlow.py    From Mask-RCNN-Pedestrian-Detection with MIT License 5 votes vote down vote up
def denseOpticalFlow():
    # use 0 for webcam capturing
    # cap = cv2.VideoCapture(0)

    cap = cv2.VideoCapture('test/Pedestrian overpass.mp4')
    ret, frame1 = cap.read()
    prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
    hsv = np.zeros_like(frame1)
    hsv[...,1] = 255

    while(1):
        ret, frame2 = cap.read()
        next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
        flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
        mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])

        hsv[...,0] = ang*180/np.pi/2
        hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)

        # print(np.sum(mag[100:300, 100:300]))
        if (np.sum(mag)> 100000):
            print('motion detected')

        bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
        cv2.imshow('frame2',bgr)

        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite('opticalfb.png',frame2)
            cv2.imwrite('opticalhsv.png',bgr)
        prvs = next

    cap.release()
    cv2.destroyAllWindows() 
Example #5
Source File: video_avi_flow_saliency.py    From self-supervision with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
    cap = cv2.VideoCapture(fn)
    n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    outputs = []
    if n_frames < frames * 2:
        return outputs

    def resize(im):
        if scale_factor != 1.0:
            new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
            return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
        else:
            return im

    for t in times:
        cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
        ret, frame0 = cap.read()
        im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
        mags = []
        middle_frame = frame0
        for f in range(frames - 1):
            ret, frame1 = cap.read()
            if f == frames // 2:
                middle_frame = frame1
            im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
            flow = cv2.calcOpticalFlowFarneback(im0, im1,
                        None, 0.5, 3, 15, 3, 5, 1.2, 0)
            mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            mags.append(mag)
            im0 = im1
        mag = np.sum(mags, 0)
        mag = mag.clip(min=0)
        norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5)
        x = middle_frame[..., ::-1].astype(np.float32) / 255
        outputs.append((x, norm_mag))
        return outputs 
Example #6
Source File: video_jpeg_rolls_flow_saliency.py    From self-supervision with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_optical_flow(fn, n_frames=34):
    img = dd.image.load(fn)
    if img.shape != (128*34, 128, 3):
        return []
    frames = np.array_split(img, 34, axis=0)
    grayscale_frames = [fr.mean(-1) for fr in frames]
    mags = []
    skip_frames = np.random.randint(34 - n_frames + 1)
    middle_frame = frames[np.random.randint(skip_frames, skip_frames+n_frames)]
    im0 = grayscale_frames[skip_frames]
    for f in range(1+skip_frames, 1+skip_frames+n_frames-1):
        im1 = grayscale_frames[f]
        flow = cv2.calcOpticalFlowFarneback(im0, im1,
                    None, # flow
                    0.5, # pyr_scale
                    3, # levels
                    np.random.randint(3, 20), # winsize
                    3, #iterations
                    5, #poly_n 
                    1.2, #poly_sigma
                    0 # flags
        )
        mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
        mags.append(mag)
        im0 = im1
    mag = np.sum(mags, 0)
    mag = mag.clip(min=0)
    #norm_mag = np.tanh(mag * 10000)
    norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5)
    outputs = []
    outputs.append((middle_frame, norm_mag))
    return outputs 
Example #7
Source File: pySaliencyMap.py    From pliers with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def MFMGetFM(self, src):
        # convert scale
        I8U = np.uint8(255 * src)
        cv2.waitKey(10)
        # calculating optical flows
        if self.prev_frame is not None:
            farne_pyr_scale = pySaliencyMapDefs.farne_pyr_scale
            farne_levels = pySaliencyMapDefs.farne_levels
            farne_winsize = pySaliencyMapDefs.farne_winsize
            farne_iterations = pySaliencyMapDefs.farne_iterations
            farne_poly_n = pySaliencyMapDefs.farne_poly_n
            farne_poly_sigma = pySaliencyMapDefs.farne_poly_sigma
            farne_flags = pySaliencyMapDefs.farne_flags
            flow = cv2.calcOpticalFlowFarneback(
                prev=self.prev_frame,
                next=I8U,
                pyr_scale=farne_pyr_scale,
                levels=farne_levels,
                winsize=farne_winsize,
                iterations=farne_iterations,
                poly_n=farne_poly_n,
                poly_sigma=farne_poly_sigma,
                flags=farne_flags,
                flow=None
            )
            flowx = flow[..., 0]
            flowy = flow[..., 1]
        else:
            flowx = np.zeros(I8U.shape)
            flowy = np.zeros(I8U.shape)
        # create Gaussian pyramids
        dst_x = self.FMGaussianPyrCSD(flowx)
        dst_y = self.FMGaussianPyrCSD(flowy)
        # update the current frame
        self.prev_frame = np.uint8(I8U)
        # return
        return dst_x, dst_y

    # conspicuity maps
    # standard range normalization 
Example #8
Source File: metrics.py    From suite2p with GNU General Public License v3.0 5 votes vote down vote up
def optic_flow(mov, tmpl, nflows):
    """ optic flow computation using farneback """
    window = int(1 / 0.2) # window size
    nframes, Ly, Lx = mov.shape
    mov = mov.astype(np.float32)
    mov = np.reshape(mov[:int(np.floor(nframes/window)*window),:,:],
                                  (-1,window,Ly,Lx)).mean(axis=1)

    mov = mov[np.random.permutation(mov.shape[0])[:min(nflows,mov.shape[0])], :, :]

    pyr_scale=.5
    levels=3
    winsize=100
    iterations=15
    poly_n=5
    poly_sigma=1.2 / 5
    flags=0

    nframes, Ly, Lx = mov.shape
    norms = np.zeros((nframes,))
    flows = np.zeros((nframes,Ly,Lx,2))

    for n in range(nframes):
        flow = cv2.calcOpticalFlowFarneback(
            tmpl, mov[n,:,:], None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)

        flows[n,:,:,:] = flow
        norms[n] = ((flow**2).sum()) ** 0.5

    return flows, norms 
Example #9
Source File: opt_flow.py    From Walk-Assistant with GNU General Public License v3.0 5 votes vote down vote up
def get_direction(self, frame1, frame2, show=False):
        frame1 = cv2.resize(frame1, (self.width, self.height))
        frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        frame2 = cv2.resize(frame2, (self.width, self.height))
        frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

        flow = cv2.calcOpticalFlowFarneback(frame1[self.height_start:self.height_end],
                                            frame2[self.height_start:self.height_end], None, 0.5, 3, 15, 1, 5, 1.2, 0)
        flow_avg = np.median(flow, axis=(0, 1))  # [x, y]

        move_x = -1 * flow_avg[0]
        move_y = -1 * flow_avg[1]

        if show:
            hsv = np.zeros((self.height_end - self.height_start, self.width, 3))
            hsv[...,1] = 255
            mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
            hsv[..., 0] = ang * 180 / np.pi / 2
            hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
            bgr = cv2.cvtColor(np.array(hsv).astype(np.uint8), cv2.COLOR_HSV2BGR)

            cv2.imshow('opt_flow', bgr)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print('User Interrupted')
                exit(1)

        return move_x, move_y 
Example #10
Source File: OF_utils.py    From ActionRecognition with MIT License 5 votes vote down vote up
def _calc_optical_flow(prev, next_):
    flow = cv2.calcOpticalFlowFarneback(prev, next_, flow=None, pyr_scale=0.5, levels=3, winsize=15, iterations=3,
                                        poly_n=5, poly_sigma=1.2, flags=0)
    return flow 
Example #11
Source File: dense_optflow.py    From snapchat-filters-opencv with MIT License 5 votes vote down vote up
def dense_flow(image):
    global prvs
    next = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)

    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    prvs = next
    return image 
Example #12
Source File: opticalflow.py    From sign-language with MIT License 5 votes vote down vote up
def next(self, arImage:np.array) -> np.array:

        # first?
        if self.arPrev.shape == (1,1): return self.first(arImage)

        # get image in black&white
        arCurrent = cv2.cvtColor(arImage, cv2.COLOR_BGR2GRAY)

        if self.sAlgorithm == "tvl1":
            arFlow = self.oTVL1.calc(self.arPrev, arCurrent, None)
        elif self.sAlgorithm == "farnback":
            arFlow = cv2.calcOpticalFlowFarneback(self.arPrev, arCurrent, flow=None, 
                pyr_scale=0.5, levels=1, winsize=15, iterations=2, poly_n=5, poly_sigma=1.1, flags=0)
        else: raise ValueError("Unknown optical flow type")

        # only 2 dims
        arFlow = arFlow[:, :, 0:2]

        # truncate to +/-15.0, then rescale to [-1.0, 1.0]
        arFlow[arFlow > self.fBound] = self.fBound 
        arFlow[arFlow < -self.fBound] = -self.fBound
        arFlow = arFlow / self.fBound

        if self.bThirdChannel:
            # add third empty channel
            arFlow = np.concatenate((arFlow, self.arZeros), axis=2) 

        self.arPrev = arCurrent

        return arFlow 
Example #13
Source File: video_avi_flow.py    From self-supervision with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
    cap = cv2.VideoCapture(fn)
    if not cap.isOpened():
        return []
    n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    outputs = []
    if n_frames < frames * 2:
        return outputs

    def resize(im):
        if scale_factor != 1.0:
            new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
            return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
        else:
            return im

    for t in times:
        cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
        ret, frame0 = cap.read()
        im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
        mags = []
        middle_frame = frame0
        flows = []
        for f in range(frames - 1):
            ret, frame1 = cap.read()
            if f == frames // 2:
                middle_frame = frame1
            im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
            flow = cv2.calcOpticalFlowFarneback(im0, im1,
                        None,
                        0.5, # py_scale
                        8,   # levels
                        int(40 * scale_factor),  # winsize
                        10,   # iterations
                        5,  # poly_n
                        1.1, # poly_sigma
                        cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
            #mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            #mags.append(mag)
            flows.append(flow)
            im0 = im1
        flow = (np.mean(flows, 0) / 100).clip(-1, 1)

        #flow = np.mean(flows, 0)
        #flow /= (flow.mean() * 5 + 1e-5)
        #flow = flow.clip(-1, 1)
        #flows = flows / (np.mean(flows, 0, keepdims=True) + 1e-5)
        x = middle_frame[..., ::-1].astype(np.float32) / 255
        outputs.append((x, flow))
    return outputs 
Example #14
Source File: opticalflow_estimate.py    From CrowdFlow with GNU General Public License v3.0 4 votes vote down vote up
def run_parameter(config_item):
    prev_img        = cv2.imread(config_item["files"]["prevImg"])
    curr_img        = cv2.imread(config_item["files"]["currImg"])
    flow_method     = config_item["parameter"]["flow_method"]
    estimate_base   = config_item["files"]["estimatepath"]  + "/"
    
    if os.path.exists(estimate_base) == False:
       os.makedirs(estimate_base)

    if os.path.exists(config_item["files"]["estflow"]):
        return
    #  compute optical flow
    if  flow_method.find("dual") >= 0:
        dual_proc = cv2.DualTVL1OpticalFlow_create(config_item["parameter"]["tau"],
                                                   config_item["parameter"]["lambda"],
                                                   config_item["parameter"]["theta"],
                                                   config_item["parameter"]["nscales"],
                                                   config_item["parameter"]["warps"])
        est_flow = np.zeros(shape=(prev_img.shape[0], prev_img.shape[1],2), dtype=np.float32)
        dual_proc.calc(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY), cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY), est_flow)
    #
    elif flow_method.find("farneback") >= 0:
        est_flow = cv2.calcOpticalFlowFarneback(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY),
                                                cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY),
                                                None, 0.5, 3, 15, 3, 5, 1.2, 0)
    elif flow_method.find("plk") >= 0:
        prev_pts = list()
        for r in range(prev_img.shape[0]):
            for c in range(prev_img.shape[1]):
                prev_pts.append((c,r))
        prev_pts = np.array(prev_pts, dtype=np.float32)
        curr_pts, st, err = cv2.calcOpticalFlowPyrLK(cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY),
                                                cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY),
                                               prev_pts, None,
                                               winSize=(21,21), maxLevel=3, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.001))
        est_flow = np.zeros(shape=(prev_img.shape[0], prev_img.shape[1],2), dtype=np.float32)
        n = 0
        flow_pts = curr_pts - prev_pts
        for r in range(prev_img.shape[0]):
            for c in range(prev_img.shape[1]):
                est_flow[r, c, :] = flow_pts[n,:]
                n = n + 1
    #here alternative optical flow methods can be applied
    #
    else:
        raise ValueError("flow method has not been implemented")

    ut.writeFlowFile(config_item["files"]["estflow"], est_flow)
    ut.drawFlowField(config_item["files"]["estflow"][:-3] + "png", est_flow)
    print("Done -> ", config_item["files"]["estflow"]) 
Example #15
Source File: __init__.py    From OpenCV-Python-Hacks with GNU Lesser General Public License v3.0 4 votes vote down vote up
def processFrame(self, frame, distance=None, timestep=1):
        '''
        Processes one image frame, returning summed X,Y flow and frame.

        Optional inputs are:

          distance - distance in meters to image (focal length) for returning flow in meters per second
          timestep - time step in seconds for returning flow in meters per second
        '''

        frame2 = cv2.resize(frame, self.size)
 
        gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

        xsum, ysum = 0,0

        xvel, yvel = 0,0

        flow = None
        
        if not self.prev_gray is None:

            flow = cv2.calcOpticalFlowFarneback(self.prev_gray, gray, flow, pyr_scale=0.5, levels=5, winsize=13, iterations=10, poly_n=5, poly_sigma=1.1, flags=0) 

            for y in range(0, flow.shape[0], self.move_step):

                for x in range(0, flow.shape[1], self.move_step):

                    fx, fy = flow[y, x]
                    xsum += fx
                    ysum += fy

                    cv2.line(frame2, (x,y), (int(x+fx),int(y+fy)), self.mv_color_bgr)
                    cv2.circle(frame2, (x,y), 1, self.mv_color_bgr, -1)

            # Default to system time if no timestep
            curr_time = time.time()
            if not timestep:
                timestep = (curr_time - self.prev_time) if self.prev_time else 1
            self.prev_time = curr_time

            xvel = self._get_velocity(flow, xsum, flow.shape[1], distance, timestep)
            yvel = self._get_velocity(flow, ysum, flow.shape[0], distance, timestep)

        self.prev_gray = gray

        if self.window_name:
            cv2.imshow(self.window_name, frame2)
            if cv2.waitKey(1) & 0x000000FF== 27: # ESC
                return None
        
        # Return x,y velocities and new image with flow lines
        return  xvel, yvel, frame2 
Example #16
Source File: pySaliencyMap.py    From aim with MIT License 4 votes vote down vote up
def MFMGetFM(self, src):
        # Convert scale
        I8U = np.uint8(255 * src)
        # cv2.waitKey(10)
        # Calculating optical flows
        if self.prev_frame is not None:
            farne_pyr_scale = pySaliencyMapDefs.farne_pyr_scale
            farne_levels = pySaliencyMapDefs.farne_levels
            farne_winsize = pySaliencyMapDefs.farne_winsize
            farne_iterations = pySaliencyMapDefs.farne_iterations
            farne_poly_n = pySaliencyMapDefs.farne_poly_n
            farne_poly_sigma = pySaliencyMapDefs.farne_poly_sigma
            farne_flags = pySaliencyMapDefs.farne_flags
            flow = cv2.calcOpticalFlowFarneback( \
                prev=self.prev_frame, \
                next=I8U, \
                pyr_scale=farne_pyr_scale, \
                levels=farne_levels, \
                winsize=farne_winsize, \
                iterations=farne_iterations, \
                poly_n=farne_poly_n, \
                poly_sigma=farne_poly_sigma, \
                flags=farne_flags, \
                flow=None \
                )
            flowx = flow[..., 0]
            flowy = flow[..., 1]
        else:
            flowx = np.zeros(I8U.shape)
            flowy = np.zeros(I8U.shape)

        # Create Gaussian pyramids
        dst_x = self.FMGaussianPyrCSD(flowx)
        dst_y = self.FMGaussianPyrCSD(flowy)

        # Update the current frame
        self.prev_frame = np.uint8(I8U)

        return dst_x, dst_y


    # Conspicuity maps
    # Standard range normalization