Python picamera.array() Examples

The following are 25 code examples of picamera.array(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module picamera , or try the search function .
Example #1
Source File: pi-timolo81.py    From pi-timolo with MIT License 6 votes vote down vote up
def getStreamImage(isDay):
    # Capture an image stream to memory based on daymode
    with picamera.PiCamera() as camera:
        camera.resolution = (testWidth, testHeight)
        with picamera.array.PiRGBArray(camera) as stream:
            if isDay:
                camera.exposure_mode = 'auto'
                camera.awb_mode = 'auto'
                time.sleep(motionCamSleep)   # sleep so camera can get AWB
            else:
                # use variable framerate_range for Low Light motion image stream
                camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
                time.sleep(2) # Give camera time to measure AWB
                camera.iso = nightMaxISO
            camera.capture(stream, format='rgb', use_video_port=useVideoPort)
            camera.close()
            return stream.array

#----------------------------------------------------------------------------------------------- 
Example #2
Source File: pi-timolo.py    From pi-timolo with MIT License 6 votes vote down vote up
def getStreamImage(isDay):
    # Capture an image stream to memory based on daymode
    with picamera.PiCamera() as camera:
        camera.resolution = (testWidth, testHeight)
        with picamera.array.PiRGBArray(camera) as stream:
            if isDay:
                camera.exposure_mode = 'auto'
                camera.awb_mode = 'auto'
                time.sleep(motionCamSleep)   # sleep so camera can get AWB
            else:
                # use variable framerate_range for Low Light motion image stream
                camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
                time.sleep(2) # Give camera time to measure AWB
                camera.iso = nightMaxISO
            camera.capture(stream, format='rgb', use_video_port=useVideoPort)
            camera.close()
            return stream.array

#----------------------------------------------------------------------------------------------- 
Example #3
Source File: camera.py    From rpitelecine with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def take_bracket_pictures(self):
	""" 
	Returns two images in a list
	One with normal exposure, and one with 2 stop longer exposure 
	The aim to to get detail out of shadows/underexposed film
	Resulting images can be combined on a PC with Hugin's enfuse utility
	"""
	old_shutter = self.shutter_speed
	imgs = []
	with picamera.array.PiRGBArray(self) as output:
	    self.capture(output, format='bgr')
	    imgs.append( output.array )
	    self.shutter_speed = old_shutter*4
	    output.truncate(0)
	    self.capture(output, format='bgr')
	    imgs.append( output.array )
	self.shutter_speed = old_shutter
	return imgs 
Example #4
Source File: image_processor.py    From aws-greengrass-mini-fulfillment with Apache License 2.0 6 votes vote down vote up
def __init__(self, res_width=96, res_height=96):
        self.camera = picamera.PiCamera(resolution=(res_width, res_height))
        # TODO propagate configurable resolution through '96' logic below

        self.camera.hflip = True
        self.camera.vflip = True
        self.res_width = res_width
        self.res_height = res_height
        self.stream = picamera.array.PiYUVArray(self.camera)
        self.pixelObjList = []
        self.object_id_center = 0
        self.pixelObjList.append(PixelObject(self.next_obj_id()))
        self.max_pixel_count = 0
        self.largest_object_id = 0
        self.largest_X = 0
        self.largest_Y = 0
        self.filename = '' 
Example #5
Source File: cameras.py    From ethoscope with GNU General Public License v3.0 6 votes vote down vote up
def __iter__(self):
        """
        Iterate thought consecutive frames of this camera.

        :return: the time (in ms) and a frame (numpy array).
        :rtype: (int, :class:`~numpy.ndarray`)
        """
        at_least_one_frame = False
        while True:
            if self.is_last_frame() or not self.is_opened():
                if not at_least_one_frame:
                    raise EthoscopeException("Camera could not read the first frame")
                break
            t,out = self._next_time_image()
            if out is None:
                break
            t_ms = int(1000*t)
            at_least_one_frame = True

            if (self._frame_idx % self._drop_each) == 0:
                yield t_ms,out

            if self._max_duration is not None and t > self._max_duration:
                break 
Example #6
Source File: FPVtest.py    From Adeept_RaspTank with MIT License 6 votes vote down vote up
def colorFindSet(self,invarH, invarS, invarV):#1
		global colorUpper, colorLower
		HUE_1 = invarH+11
		HUE_2 = invarH-11
		if HUE_1>255:HUE_1=255
		if HUE_2<0:HUE_2=0

		SAT_1 = invarS+170
		SAT_2 = invarS-20
		if SAT_1>255:SAT_1=255
		if SAT_2<0:SAT_2=0

		VAL_1 = invarV+170
		VAL_2 = invarV-20
		if VAL_1>255:VAL_1=255
		if VAL_2<0:VAL_2=0

		colorUpper = np.array([HUE_1, SAT_1, VAL_1])
		colorLower = np.array([HUE_2, SAT_2, VAL_2])
		print('HSV_1:%d %d %d'%(HUE_1, SAT_1, VAL_1))
		print('HSV_2:%d %d %d'%(HUE_2, SAT_2, VAL_2)) 
Example #7
Source File: FileSaver.py    From NaturewatchCameraServer with GNU General Public License v3.0 6 votes vote down vote up
def save_image(self, image, timestamp):
        """
        Save image to disk
        :param image: numpy array image
        :param timestamp: formatted timestamp string
        :return: filename
        """
        if self.checkStorage() < 99:
            filename = timestamp
            filename = filename + ".jpg"
            self.logger.debug('FileSaver: saving file')
            try:
                cv2.imwrite(os.path.join(self.config["photos_path"], filename), image)
                self.logger.info("FileSaver: saved file to " + os.path.join(self.config["photos_path"], filename))
                return filename
            except Exception as e:
                self.logger.error('FileSaver: save_photo() error: ')
                self.logger.exception(e)
                pass
        else:
            self.logger.error('FileSaver: not enough space to save image')
            return None 
Example #8
Source File: drive.py    From SDRC with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.camera = picamera.PiCamera()
        self.camera.vflip = True
        self.camera.hflip = True
        #self.camera.resolution = (320, 160)
        self.camera.start_preview()
        sleep(5)
        self.stream = picamera.array.PiYUVArray(self.camera) 
Example #9
Source File: microscope.py    From openflexure_microscope_software with GNU General Public License v3.0 5 votes vote down vote up
def extract_settings(source_dict, converters):
    """Extract a subset of a dictionary of settings.

    For each item in ``source_dict`` that shares a key with an item in
    ``converters``, return a dictionary of values that have been
    processed using the conversion functions in the second dict.

    NB "None" is equivalent to no processing, to save some typing.
    There are some special string values for converters:
    "[()]" will convert a 0-dimensional numpy array to a scalar
    "[0]" will return the first element of a 1D array
    If either "[()]" or "[0]" is specified and raises an exception,
    then we fall back to no processing.  This is good if the values
    might be from a numpy ``.npz`` file, or might be specified directly.
    """
    settings = {}
    for k in source_dict:
        if k in converters:
            if converters[k] is None:
                settings[k] = source_dict[k]
            elif converters[k] == "[()]":
                try:
                    settings[k] = source_dict[k][()]
                except:
                    settings[k] = source_dict[k]
            elif converters[k] == "[0]":
                try:
                    settings[k] = source_dict[k][0]
                except:
                    settings[k] = settings[k]
            else:
                settings[k] = converters[k](source_dict[k])
    return settings 
Example #10
Source File: microscope.py    From openflexure_microscope_software with GNU General Public License v3.0 5 votes vote down vote up
def acquire_image_stack(self, step_displacement, n_steps, output_dir, raw=False):
        """Scan an edge across the field of view, to measure distortion.

        You should start this routine with the edge positioned in the centre of the
        microscope's field of view.  You specify the x,y,z shift between images, and
        the number of images - these points will be distributed either side of where
        you start.

        step_displacement: a 3-element array/list specifying the step (if a scalar
            is passed, it's assumed to be Z)
        n_steps: the number of steps to take
        output_dir: the directory in which to save images
        backlash: the backlash correction amount (default: 128 steps)
        """
        # Ensure the displacement per step is an array, and that scalars do z steps
        step_displacement = np.array(step_displacement)
        if len(step_displacement.shape) == 0:
            step_displacement = np.array([0, 0, step_displacement.value])
        elif step_displacement.shape == (1,):
            step_displacement = np.array([0, 0, step_displacement[0]])
        ii = np.arange(n_steps) - (n_steps - 1.0)/2.0 # an array centred on zero
        scan_points = ii[:, np.newaxis] * step_displacement[np.newaxis, :]

        with set_properties(self.stage, backlash=256):
            for i in self.stage.scan_linear(scan_points):
                time.sleep(1)
                filepath = os.path.join(output_dir,"image_%03d_x%d_y%d_z%d.jpg" % 
                                                   ((i,) + tuple(self.stage.position)))
                print("capturing {}".format(filepath))
                self.camera.capture(filepath, use_video_port=False, bayer=raw)
            time.sleep(0.5) 
Example #11
Source File: microscope.py    From openflexure_microscope_software with GNU General Public License v3.0 5 votes vote down vote up
def rgb_image(self, use_video_port=True, resize=None):
        """Capture a frame from a camera and output to a numpy array"""
        with picamera.array.PiRGBArray(self.camera, size=resize) as output:
            self.camera.capture(output, 
                    format='rgb', 
                    resize=resize,
                    use_video_port=use_video_port)
        #get an image, see picamera.readthedocs.org/en/latest/recipes2.html
            return output.array 
Example #12
Source File: microscope.py    From openflexure_microscope_software with GNU General Public License v3.0 5 votes vote down vote up
def rgb_image_old(self, use_video_port=True):
        """Capture a frame from a camera and output to a numpy array"""
        res = round_resolution(self.camera.resolution)
        shape = (res[1], res[0], 3)
        buf = np.empty(np.product(shape), dtype=np.uint8)
        self.camera.capture(buf, 
                format='rgb', 
                use_video_port=use_video_port)
        #get an image, see picamera.readthedocs.org/en/latest/recipes2.html
        return buf.reshape(shape) 
Example #13
Source File: microscope.py    From openflexure_microscope_software with GNU General Public License v3.0 5 votes vote down vote up
def sharpness_edge(image):
    """Return a sharpness metric optimised for vertical lines"""
    gray = np.mean(image.astype(float), 2)
    n = 20
    edge = np.array([[-1]*n + [1]*n])
    return np.sum([np.sum(ndimage.filters.convolve(gray,W)**2) 
                   for W in [edge, edge.T]]) 
Example #14
Source File: microscope.py    From openflexure_microscope_software with GNU General Public License v3.0 5 votes vote down vote up
def decimate_to(shape, image):
    """Decimate an image to reduce its size if it's too big."""
    decimation = np.max(np.ceil(np.array(image.shape, dtype=np.float)[:len(shape)]/np.array(shape)))
    return image[::int(decimation), ::int(decimation), ...] 
Example #15
Source File: CameraController.py    From NaturewatchCameraServer with GNU General Public License v3.0 5 votes vote down vote up
def run(self):
        while not self.is_stopped():
            try:
                if picamera_exists:
                    try:
                        # Get image from Pi camera
                        self.picamera_md_output.truncate(0)
                        self.picamera_md_output.seek(0)
                        self.picamera_md_stream.__next__()
                        self.image = self.picamera_md_output.array
                        if self.image is None:
                            self.logger.warning("CameraController: got empty image.")
                        time.sleep(0.01)
                    except Exception as e:
                        self.logger.error("CameraController: picamera update error.")
                        self.logger.exception(e)
                        self.initialise_picamera()
                        time.sleep(0.02)

                else:
                    # Get image from webcam
                    if self.use_splitter_port:
                        ret, self.splitter_image = self.capture.read()
                        if self.splitter_image is not None:
                            self.image = imutils.resize(self.splitter_image, width=self.width, height=self.height)
                    else:
                        ret, self.image = self.capture.read()

                    if self.image is None:
                        self.logger.warning("CameraController: got empty image.")

            except KeyboardInterrupt:
                self.logger.info("CameraController: received KeyboardInterrupt,  shutting down ...")
                self.stop()

    # Stop thread 
Example #16
Source File: camera.py    From rpi-deep-pantilt with MIT License 5 votes vote down vote up
def flush(self):
        # looping until self.stopped flag is flipped
        # for now, grab the first frame in buffer, then empty buffer
        for f in self.stream:
            self.frame = f.array
            self.data_container.truncate(0)

            if self.stopped:
                self.stream.close()
                self.data_container.close()
                self.camera.close()
                return 
Example #17
Source File: image_processor.py    From aws-greengrass-mini-fulfillment with Apache License 2.0 5 votes vote down vote up
def capture_frame(self):
        self.stream = picamera.array.PiYUVArray(self.camera)
        self.camera.capture(self.stream, 'yuv')
        self.camera._set_led(True)

        self.pixelObjList = []
        self.object_id_center = 0
        self.pixelObjList.append(PixelObject(self.next_obj_id()))

        rows = []
        for _ in range(self.res_height):
            rows.append(range(self.res_width))

        # flip image horizontally
        for j, j_ in enumerate(range(self.res_width-1, -1, -1)):
            # now flip vertically
            for i, i_ in enumerate(range(self.res_height-1, -1, -1)):
                rows[j][i] = self.stream.array[j_][i_][0]

        self.filename = self.save_PNG('raw.png', rows)
        self.spread_white_pixels(
            self.make_black_and_white(
                self.fuse_horizontal_and_vertical(
                    self.get_horizontal_edges(rows),
                    self.get_vertical_edges(rows)))
        ) 
Example #18
Source File: camera.py    From rpitelecine with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def take_picture(self):
        """ 
        Returns an openCV compatible colour image 
        """
        with picamera.array.PiRGBArray(self) as output:
            self.capture(output, format='bgr')
            return output.array 
Example #19
Source File: pigear.py    From vidgear with Apache License 2.0 5 votes vote down vote up
def read(self):
        """
        Extracts frames synchronously from monitored deque, while maintaining a fixed-length frame buffer in the memory, 
        and blocks the thread if the deque is full.

        **Returns:** A n-dimensional numpy array. 
        """
        # check if there are any thread exceptions
        if not (self.__exceptions is None):
            if isinstance(self.__exceptions, bool):
                # clear frame
                self.frame = None
                # notify user about hardware failure
                raise SystemError(
                    "[PiGear:ERROR] :: Hardware failure occurred, Kindly reconnect Camera Module and restart your Pi!"
                )
            else:
                # clear frame
                self.frame = None
                # re-raise error for debugging
                error_msg = "[PiGear:ERROR] :: Camera Module API failure occured: {}".format(
                    self.__exceptions[1]
                )
                raise RuntimeError(error_msg).with_traceback(self.__exceptions[2])

        # return the frame
        return self.frame 
Example #20
Source File: pi-timolo.py    From pi-timolo with MIT License 5 votes vote down vote up
def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return 
Example #21
Source File: pi-timolo81.py    From pi-timolo with MIT License 5 votes vote down vote up
def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return 
Example #22
Source File: drive.py    From SDRC with GNU General Public License v3.0 5 votes vote down vote up
def capture(self):
        self.camera.capture(self.stream, format='yuv')
        img = self.stream.array[270:,:,0]
        self.stream.seek(0)
        self.stream.truncate()
        return img 
Example #23
Source File: CameraController.py    From NaturewatchCameraServer with GNU General Public License v3.0 4 votes vote down vote up
def initialise_picamera(self):
        self.logger.debug('CameraController: initialising picamera ...')

        # If there is already a running instance, close it
        if self.camera is not None:
            self.camera.close()

        # Create a new instance
        self.camera = picamera.PiCamera()
        # Check for module revision
        # TODO: set maximum resolution based on module revision
        self.logger.debug('CameraController: camera module revision {} detected.'.format(self.camera.revision))

        # Set camera parameters
        self.camera.framerate = self.config["frame_rate"]
        self.camera.resolution = (self.width, self.height)

        picamera.PiCamera.CAPTURE_TIMEOUT = 60

        if self.config["rotate_camera"] == 1:
            self.camera.rotation = 180
            self.rotated_camera = True
        else:
            self.camera.rotation = 0
            self.rotated_camera = False

        self.logger.info('CameraController: camera initialised with a resolution of {} and a framerate of {}'.format(
            self.camera.resolution, self.camera.framerate))

# TODO: use correct port fitting the requested resolution
        # Set up low res stream for motion detection
        self.picamera_md_output = picamera.array.PiRGBArray(self.camera, size=(self.md_width, self.md_height))
        self.picamera_md_stream = self.camera.capture_continuous(self.picamera_md_output, format="bgr",
                                                                 use_video_port=True, splitter_port=2,
                                                                 resize=(self.md_width, self.md_height))
        self.logger.debug('CameraController: low res stream prepared with resolution {}x{}.'.format(self.md_width,
                                                                                                    self.md_height))

        # Set up high res stream for actual recording
        # Bitrate has to be specified so size can be calculated from the seconds specified
        # Unfortunately the effective bitrate depends on the quality-parameter specified with start_recording,
        # so the effective duration can not be predicted well
        self.picamera_video_stream = picamera.PiCameraCircularIO(self.camera,
                                                                 bitrate=self.video_bitrate,
                                                                 seconds=self.config["video_duration_before_motion"] +
                                                                 self.config["video_duration_after_motion"])
        self.logger.debug('CameraController: circular stream prepared for video.')

        time.sleep(2)

# TODO: Understand
    # initialise webcam 
Example #24
Source File: ndvi.py    From RPiNDVI with MIT License 4 votes vote down vote up
def run():
    with picamera.PiCamera() as camera:
        # Set the camera resolution
        x = 400
        camera.resolution = (int(1.33 * x), x)
        # Various optional camera settings below:
        # camera.framerate = 5
        # camera.awb_mode = 'off'
        # camera.awb_gains = (0.5, 0.5)

        # Need to sleep to give the camera time to get set up properly
        time.sleep(1)

        with picamera.array.PiRGBArray(camera) as stream:
            # Loop constantly
            while True:
                # Grab data from the camera, in colour format
                # NOTE: This comes in BGR rather than RGB, which is important
                # for later!
                camera.capture(stream, format='bgr', use_video_port=True)
                image = stream.array

                # Get the individual colour components of the image
                b, g, r = cv2.split(image)

                # Calculate the NDVI

                # Bottom of fraction
                bottom = (r.astype(float) + b.astype(float))
                bottom[bottom == 0] = 0.01  # Make sure we don't divide by zero!

                ndvi = (r.astype(float) - b) / bottom
                ndvi = contrast_stretch(ndvi)
                ndvi = ndvi.astype(np.uint8)

                # Do the labelling
                label(b, 'Blue')
                label(g, 'Green')
                label(r, 'NIR')
                label(ndvi, 'NDVI')

                # Combine ready for display
                combined = disp_multiple(b, g, r, ndvi)

                # Display
                cv2.imshow('image', combined)

                stream.truncate(0)

                # If we press ESC then break out of the loop
                c = cv2.waitKey(7) % 0x100
                if c == 27:
                    break

    # Important cleanup here!
    cv2.destroyAllWindows() 
Example #25
Source File: recalibrate.py    From openflexure_microscope_software with GNU General Public License v3.0 4 votes vote down vote up
def lens_shading_correction_from_rgb(rgb_array, binsize=64):
    """Calculate a correction to a lens shading table from an RGB image.
    
    Returns:
        a floating-point table of gains that should multiply the current
        lens shading table.
    """
    full_resolution = rgb_array.shape[:2]
    table_resolution = [(r // binsize) + 1 for r in full_resolution]
    lens_shading = np.zeros([4] + table_resolution, dtype=np.float)
    
    for i in range(3):
        # We simplify life by dealing with only one channel at a time.
        image_channel = rgb_array[:,:,i]
        iw, ih = image_channel.shape
        ls_channel = lens_shading[int(i*1.6),:,:] # NB there are *two* green channels
        lw, lh = ls_channel.shape
        # The lens shading table is rounded **up** in size to 1/64th of the size of
        # the image.  Rather than handle edge images separately, I'm just going to
        # pad the image by copying edge pixels, so that it is exactly 32 times the
        # size of the lens shading table (NB 32 not 64 because each channel is only
        # half the size of the full image - remember the Bayer pattern...  This
        # should give results very close to 6by9's solution, albeit considerably 
        # less computationally efficient!
        padded_image_channel = np.pad(image_channel, 
                                      [(0, lw*binsize - iw), (0, lh*binsize - ih)],
                                      mode="edge") # Pad image to the right and bottom
        assert padded_image_channel.shape == (lw*binsize, lh*binsize), "padding problem"
        # Next, fill the shading table (except edge pixels).  Please excuse the
        # for loop - I know it's not fast but this code needn't be!
        box = 3 # We average together a square of this side length for each pixel.
        # NB this isn't quite what 6by9's program does - it averages 3 pixels
        # horizontally, but not vertically.
        for dx in np.arange(box) - box//2:
            for dy in np.arange(box) - box//2:
                ls_channel[:,:] += padded_image_channel[binsize//2+dx::binsize,binsize//2+dy::binsize]
        ls_channel /= box**2
        # Everything is normalised relative to the centre value.  I follow 6by9's
        # example and average the central 64 pixels in each channel.
        channel_centre = np.mean(image_channel[iw//2-4:iw//2+4, ih//2-4:ih//2+4])
        ls_channel /= channel_centre
        print("channel {} centre brightness {}".format(i, channel_centre))
        # NB the central pixel should now be *approximately* 1.0 (may not be exactly
        # due to different averaging widths between the normalisation & shading table)
        # For most sensible lenses I'd expect that 1.0 is the maximum value.
        # NB ls_channel should be a "view" of the whole lens shading array, so we don't
        # need to update the big array here.
        print("min {}, max {}".format(ls_channel.min(), ls_channel.max()))
    # What we actually want to calculate is the gains needed to compensate for the 
    # lens shading - that's 1/lens_shading_table_float as we currently have it.
    lens_shading[2,...] = lens_shading[1,...] # Duplicate the green channels
    gains = 1.0/lens_shading # 32 is unity gain
    return gains