Python skimage.util.img_as_ubyte() Examples

The following are 27 code examples of skimage.util.img_as_ubyte(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.util , or try the search function .
Example #1
Source File: cp5_LAB_avg.py    From aim with MIT License 6 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img= np.array(img)
    img = util.img_as_ubyte(img)

    # Convert the LAB space
    lab = color.rgb2lab(img)

    L = lab[:, :, 0]
    A = lab[:, :, 1]
    B = lab[:, :, 2]

    # Get average and standard deviation for each value separately
    meanL = np.mean(L)
    stdL = np.std(L)
    meanA = np.mean(A)
    stdA = np.std(A)
    meanB = np.mean(B)
    stdB = np.std(B)

    result = [meanL, stdL, meanA, stdA, meanB, stdB]

    return result 
Example #2
Source File: cp9_luminance_sd.py    From aim with MIT License 6 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img = np.array(img)
    img = util.img_as_ubyte(img)
    img = img.reshape(-1, 3)
    img = [tuple(l) for l in img]

    lum = []
    for pixel in img:
        # Based on: https://en.wikipedia.org/wiki/Luma_(video)
        y = 0.2126 * pixel[0] + 0.7152 * pixel[1] + 0.0722 * pixel[2]
        lum.append(y)

    result = np.std(lum)

    return [result] 
Example #3
Source File: pf1_edge_density.py    From aim with MIT License 6 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img= np.array(img)
    img_la = color.rgb2gray(img)
    img_la = util.img_as_ubyte(img_la)

    # 0.11 and 0.27, sigma = 1,     from Measuring visual clutter
    # See sigma here: https://dsp.stackexchange.com/questions/4716/differences-between-opencv-canny-and-matlab-canny
    img_la = cv2.GaussianBlur(img_la, (7, 7), 1)
    cd = cv2.Canny(img_la, 0.11, 0.27)
    total = cd.shape[0] * cd.shape[1] # Total number of pixels
    number_edges = np.count_nonzero(cd) # Number of edge pixels
    contour_density = float(number_edges) / float(total) # Ratio

    result = [contour_density]

    return result 
Example #4
Source File: pf6_quadtree_decomposition.py    From aim with MIT License 6 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img = np.array(img)
    img = util.img_as_ubyte(img)

    res_leaf = []
    cor_size = (0, 0, img.shape[1], img.shape[0])
    quadtree(img, res_leaf, cor_size, 0)
    #    fig, ax = plt.subplots(1)
    #    for rect in res_leaf:
    #        rect = patches.Rectangle((rect[0], rect[1]), rect[2], rect[3], linewidth=0.1, edgecolor='b', facecolor='none')
    #        ax.add_patch(rect)

    #   ax.imshow(img)
    #   plt.show()
    b = balance(res_leaf, img.shape[1], img.shape[0])
    s = symmetry(res_leaf, img.shape[1], img.shape[0])
    e = equilibrium(res_leaf, img.shape[1], img.shape[0])
    n = len(res_leaf)

    return [b, s, e, n] 
Example #5
Source File: frame_extraction_toolbox.py    From DeepLabCut with GNU Lesser General Public License v3.0 6 votes vote down vote up
def chooseFrame(self):
        ret, frame = self.vid.read()
        fname = Path(self.filename)
        output_path = self.config_path.parents[0] / "labeled-data" / fname.stem

        if output_path.exists():
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame = img_as_ubyte(frame)
            img_name = (
                str(output_path)
                + "/img"
                + str(self.currFrame).zfill(int(np.ceil(np.log10(self.numberFrames))))
                + ".png"
            )
            if self.cropping:
                crop_img = frame[self.y1 : self.y2, self.x1 : self.x2]
                cv2.imwrite(img_name, cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR))
            else:
                cv2.imwrite(img_name, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
        else:
            print(
                "%s path not found. Please make sure that the video was added to the config file using the function 'deeplabcut.add_new_videos'."
                % output_path
            ) 
Example #6
Source File: outlier_frame_extraction_toolbox.py    From DeepLabCut with GNU Lesser General Public License v3.0 5 votes vote down vote up
def update(self):
        """
        Updates the image with the current slider index
        """
        self.grab.Enable(True)
        self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
        self.figure, self.axes, self.canvas = self.image_panel.getfigure()
        self.vid.set(1, self.currFrame)
        ret, frame = self.vid.read()
        frame = img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        if ret:
            if self.cropping:
                self.coords = (
                    self.cfg["x1"],
                    self.cfg["x2"],
                    self.cfg["y1"],
                    self.cfg["y2"],
                )
                frame = frame[
                    int(self.coords[2]) : int(self.coords[3]),
                    int(self.coords[0]) : int(self.coords[1]),
                    :,
                ]
            else:
                self.coords = None
            self.ax = self.axes.imshow(frame, cmap=self.colormap)
            self.axes.set_title(
                str(
                    str(self.currFrame)
                    + "/"
                    + str(self.numberFrames - 1)
                    + " "
                    + self.filename
                )
            )
            self.figure.canvas.draw()
        else:
            print("Invalid frame") 
Example #7
Source File: segmentation_labelling.py    From kaggle-heart with MIT License 5 votes vote down vote up
def generate_fft_image(sequence):
    ff1 = fftn(sequence)
    fh = np.absolute(ifftn(ff1[1, :, :]))
    fh[fh < 0.1* np.max(fh)] =0.0
    #image=img_as_ubyte(fh/np.max(fh))
    return fh 
Example #8
Source File: cp3_HSV_avg.py    From aim with MIT License 5 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img = np.array(img)
    img = util.img_as_ubyte(img)
    img = img / 255. # this division is needed to get proper values. for hue, saturation and value (0 to 360, 0 to 1,0 to 1)
    img = color.rgb2hsv(img)
    img = img.reshape(-1, 3)
    img = [tuple(l) for l in img]

    h = []
    s = []
    v = []

    # Give each channel its own list
    for items in img:
        [hue, sat, val] = [items[0], items[1], items[2]]
        h.append(hue * 360)
        s.append(sat)
        v.append(val)

    # Hue is an angle, so cannot simple add and average it
    sumsin = sum(sind(h[:]))
    sumcos = sum(cosd(h[:]))

    # Get the average value and standard deviation over H,S and V
    avgHue = atan2d(sumsin, sumcos) % 360
    avgSaturation = np.mean(s)
    stdSaturation = np.std(s)
    avgValue = np.mean(v)
    stdValue = np.std(v)
    result = [avgHue, avgSaturation, stdSaturation, avgValue, stdValue]

    return result


# Functions for easy use of radials in sin,cos and tan. based on:
# https://stackoverflow.com/questions/43100286/python-trigonometric-calculations-in-degrees 
Example #9
Source File: cp6_hassler_susstrunk.py    From aim with MIT License 5 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img= np.array(img)
    img = util.img_as_ubyte(img)
    img = img.reshape(-1, 3)
    img = [tuple(l) for l in img]

    rg = []
    yb = []
    for item in img:
        [r, g, b] = [int(item[0]), int(item[1]), int(item[2])]
        # These formulae are proposed in Hasler, D. and Susstrunk, S. Measuring Colourfuness in Natural Images. (2003)
        rg.append(np.abs(r - g))
        yb.append(np.abs((0.5 * (r + g)) - b))

    meanRG = np.mean(rg)
    stdRG = np.std(rg)
    meanYB = np.mean(yb)
    stdYB = np.std(yb)
    meanRGYB = np.sqrt(meanRG ** 2 + meanYB ** 2)
    stdRGYB = np.sqrt(stdRG ** 2 + stdYB ** 2)

    # Proposed in the same paper
    colourfulness = stdRGYB + 0.3 * meanRGYB

    result = [meanRG, stdRG, meanYB, stdYB, meanRGYB, stdRGYB, colourfulness]

    return result 
Example #10
Source File: pf4_figure_ground_contrast.py    From aim with MIT License 5 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img= np.array(img)
    img_la = color.rgb2gray(img)
    img_la = util.img_as_ubyte(img_la)

    # Get the number of edge pixels per level. See 1
    edge_per_level = []
    for x in range(1, 8):
        # Blur is needed: https://dsp.stackexchange.com/questions/4716/differences-between-opencv-canny-and-matlab-canny
        img_la = cv2.GaussianBlur(img_la, (7, 7), 2)
        cd = cv2.Canny(img_la, x * 0.04, x * 0.1) # Higher level from 0.1-0.7, lower level is 40% of higher
        number_edges = np.count_nonzero(cd) # Number of edge pixels
        edge_per_level.append(number_edges)

    difference = []
    
    # Calculate the difference between each level
    for x in range(len(edge_per_level) - 1):
        difference.append(edge_per_level[x] - edge_per_level[x + 1])

    # Give weight per level. Lower levels have more impact so higher weight
    weighted_sum = 0
    for x in range(len(difference)):
        weighted_sum += difference[x] * (1.0 - ((x - 1.0) / 6.0))

    # Normalize
    try:
        result = [weighted_sum / (edge_per_level[0] - edge_per_level[5])]
    except ZeroDivisionError:
        result = [0]

    return result 
Example #11
Source File: predict_videos.py    From DeepLabCut with GNU Lesser General Public License v3.0 5 votes vote down vote up
def GetPoseS(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes):
    """ Non batch wise pose estimation for video cap."""
    if cfg["cropping"]:
        ny, nx = checkcropping(cfg, cap)

    PredictedData = np.zeros(
        (nframes, dlc_cfg["num_outputs"] * 3 * len(dlc_cfg["all_joints_names"]))
    )
    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while cap.isOpened():
        if counter % step == 0:
            pbar.update(step)

        ret, frame = cap.read()
        if ret:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg["cropping"]:
                frame = img_as_ubyte(
                    frame[cfg["y1"] : cfg["y2"], cfg["x1"] : cfg["x2"]]
                )
            else:
                frame = img_as_ubyte(frame)
            pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
            PredictedData[
                counter, :
            ] = (
                pose.flatten()
            )  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
        else:
            nframes = counter
            break
        counter += 1

    pbar.close()
    return PredictedData, nframes 
Example #12
Source File: detect.py    From pychubby with MIT License 5 votes vote down vote up
def landmarks_68(img, rectangle, model_path=None):
    """Predict 68 face landmarks.

    Parameters
    ----------
    img : np.ndarray
        Image of any dtype and number of channels.

    rectangle : dlib.rectangle
        Rectangle that represents the bounding box around a single face.

    model_path : str or pathlib.Path, default=None
        Path to where the pretrained model is located. If None then using the `CACHE_FOLDER` model.

    Returns
    -------
    lm_points : np.ndarray
        Array of shape `(68, 2)` where rows are different landmark points and the columns
        are x and y coordinates.

    original : dlib.full_object_detection
        Instance of ``dlib.full_object_detection``.

    """
    if model_path is None:
        model_path = CACHE_FOLDER / "shape_predictor_68_face_landmarks.dat"
        get_pretrained_68(model_path.parent)

    else:
        model_path = pathlib.Path(str(model_path))

    if not model_path.is_file():
        raise IOError("Invalid landmark model, {}".format(str(model_path)))

    lm_predictor = dlib.shape_predictor(str(model_path))

    original = lm_predictor(img_as_ubyte(img), rectangle)

    lm_points = np.array([[p.x, p.y] for p in original.parts()])

    return lm_points, original 
Example #13
Source File: detect.py    From pychubby with MIT License 5 votes vote down vote up
def face_rectangle(img, n_upsamples=1):
    """Find a face rectangle.

    Parameters
    ----------
    img : np.ndarray
        Image of any dtype and number of channels.

    Returns
    -------
    corners : list
        List of tuples where each tuple represents the top left and bottom right coordinates of
        the face rectangle. Note that these coordinates use the `(row, column)` convention. The
        length of the list is equal to the number of detected faces.

    faces : list
        Instance of ``dlib.rectagles`` that can be used in other algorithm.

    n_upsamples : int
        Upsample factor to apply to the image before detection. Allows to recognize
        more faces.

    """
    if not isinstance(img, np.ndarray):
        raise TypeError("The input needs to be a np.ndarray")

    dlib_detector = dlib.get_frontal_face_detector()

    faces = dlib_detector(img_as_ubyte(img), n_upsamples)

    corners = []
    for face in faces:
        x1, y1, x2, y2 = face.left(), face.top(), face.right(), face.bottom()
        top_left = (y1, x1)
        bottom_right = (y2, x2)
        corners.append((top_left, bottom_right))

    return corners, faces 
Example #14
Source File: test_utils.py    From napari with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_bool():
    data = np.zeros((10, 10), dtype=np.bool)
    data[2:-2, 2:-2] = 1
    converted = convert_to_uint8(data)
    assert converted.dtype == np.uint8
    assert np.all(img_as_ubyte(data) == converted) 
Example #15
Source File: test_utils.py    From napari with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_float(dtype):
    data = np.linspace(0, 0.5, 128, dtype=dtype, endpoint=False)
    res = np.arange(128, dtype=np.uint8)
    assert convert_to_uint8(data).dtype == np.uint8
    assert np.all(convert_to_uint8(data) == res)
    data = np.linspace(0, 1, 256, dtype=dtype)
    res = np.arange(256, dtype=np.uint8)
    assert np.all(convert_to_uint8(data) == res)
    assert np.all(img_as_ubyte(data) == convert_to_uint8(data))
    assert np.all(img_as_ubyte(data - 0.5) == convert_to_uint8(data - 0.5)) 
Example #16
Source File: test_utils.py    From napari with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_int(dtype):
    data = np.arange(50, dtype=dtype)
    data_scaled = data * 256 ** (data.dtype.itemsize - 1)
    assert convert_to_uint8(data).dtype == np.uint8
    assert convert_to_uint8(data_scaled).dtype == np.uint8
    assert np.all(img_as_ubyte(data) == convert_to_uint8(data))
    assert np.all(2 * data == convert_to_uint8(data_scaled))
    assert np.all(img_as_ubyte(data_scaled) == convert_to_uint8(data_scaled))
    assert np.all(img_as_ubyte(data - 10) == convert_to_uint8(data - 10))
    assert np.all(
        img_as_ubyte(data_scaled - 10) == convert_to_uint8(data_scaled - 10)
    ) 
Example #17
Source File: test_utils.py    From napari with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_uint(dtype):
    data = np.arange(50, dtype=dtype)
    data_scaled = data * 256 ** (data.dtype.itemsize - 1)
    assert convert_to_uint8(data_scaled).dtype == np.uint8
    assert np.all(data == convert_to_uint8(data_scaled))
    assert np.all(img_as_ubyte(data) == convert_to_uint8(data))
    assert np.all(img_as_ubyte(data_scaled) == convert_to_uint8(data_scaled)) 
Example #18
Source File: synth.py    From PSSR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def save_img(fn, img):
    if len(img.shape) == 2:
        np.warnings.filterwarnings('ignore')
        PIL.Image.fromarray(img_as_ubyte(img), mode='L').save(f'{fn}.tif')
        np.warnings.filterwarnings('default')
    else:
        img8 = (img * 255.).astype(np.uint8)
        np.save(fn.with_suffix('.npy'), img8, allow_pickle=False) 
Example #19
Source File: synth.py    From PSSR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def img_data_to_tifs(data, times, crappify, max_scale=1.05):
    np.warnings.filterwarnings('ignore')
    lr_imgs = {}
    lr_up_imgs = {}
    hr_imgs = {}
    for time_col in range(times):
        try:
            img = data[time_col].astype(np.float).copy()
            img_max = img.max() * max_scale
            if img_max == 0: continue  #do not save images with no contents.
            img /= img_max
            down_img, down_up_img = crappify(img)
        except:
            continue

        tag = (0, 0, time_col)
        img = img_as_ubyte(img)
        pimg = PIL.Image.fromarray(img, mode='L')
        small_img = PIL.Image.fromarray(img_as_ubyte(down_img))
        big_img = PIL.Image.fromarray(img_as_ubyte(down_up_img))
        hr_imgs[tag] = pimg
        lr_imgs[tag] = small_img
        lr_up_imgs[tag] = big_img

    np.warnings.filterwarnings('default')
    return hr_imgs, lr_imgs, lr_up_imgs 
Example #20
Source File: predict_videos.py    From DeepLabCut with GNU Lesser General Public License v3.0 4 votes vote down vote up
def GetPoseF(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes, batchsize):
    """ Batchwise prediction of pose """
    PredictedData = np.zeros(
        (nframes, dlc_cfg["num_outputs"] * 3 * len(dlc_cfg["all_joints_names"]))
    )
    batch_ind = 0  # keeps track of which image within a batch should be written to
    batch_num = 0  # keeps track of which batch you are at
    ny, nx = int(cap.get(4)), int(cap.get(3))
    if cfg["cropping"]:
        ny, nx = checkcropping(cfg, cap)

    frames = np.empty(
        (batchsize, ny, nx, 3), dtype="ubyte"
    )  # this keeps all frames in a batch
    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while cap.isOpened():
        if counter % step == 0:
            pbar.update(step)
        ret, frame = cap.read()
        if ret:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg["cropping"]:
                frames[batch_ind] = img_as_ubyte(
                    frame[cfg["y1"] : cfg["y2"], cfg["x1"] : cfg["x2"]]
                )
            else:
                frames[batch_ind] = img_as_ubyte(frame)

            if batch_ind == batchsize - 1:
                pose = predict.getposeNP(frames, dlc_cfg, sess, inputs, outputs)
                PredictedData[
                    batch_num * batchsize : (batch_num + 1) * batchsize, :
                ] = pose
                batch_ind = 0
                batch_num += 1
            else:
                batch_ind += 1
        else:
            nframes = counter
            print("Detected frames: ", nframes)
            if batch_ind > 0:
                pose = predict.getposeNP(
                    frames, dlc_cfg, sess, inputs, outputs
                )  # process the whole batch (some frames might be from previous batch!)
                PredictedData[
                    batch_num * batchsize : batch_num * batchsize + batch_ind, :
                ] = pose[:batch_ind, :]
            break
        counter += 1

    pbar.close()
    return PredictedData, nframes 
Example #21
Source File: predict_videos.py    From DeepLabCut with GNU Lesser General Public License v3.0 4 votes vote down vote up
def GetPoseS_GTF(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes):
    """ Non batch wise pose estimation for video cap."""
    if cfg["cropping"]:
        ny, nx = checkcropping(cfg, cap)

    pose_tensor = predict.extract_GPUprediction(
        outputs, dlc_cfg
    )  # extract_output_tensor(outputs, dlc_cfg)
    PredictedData = np.zeros((nframes, 3 * len(dlc_cfg["all_joints_names"])))
    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while cap.isOpened():
        if counter % step == 0:
            pbar.update(step)

        ret, frame = cap.read()
        if ret:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg["cropping"]:
                frame = img_as_ubyte(
                    frame[cfg["y1"] : cfg["y2"], cfg["x1"] : cfg["x2"]]
                )
            else:
                frame = img_as_ubyte(frame)

            pose = sess.run(
                pose_tensor,
                feed_dict={inputs: np.expand_dims(frame, axis=0).astype(float)},
            )
            pose[:, [0, 1, 2]] = pose[:, [1, 0, 2]]
            # pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
            PredictedData[
                counter, :
            ] = (
                pose.flatten()
            )  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
        else:
            nframes = counter
            break
        counter += 1

    pbar.close()
    return PredictedData, nframes 
Example #22
Source File: extract_seqframes.py    From simba with GNU Lesser General Public License v3.0 4 votes vote down vote up
def convertseqVideo(videos,outtype='mp4',clahe=False,startF=None,endF=None):
    import os
    import io
    import cv2
    from tqdm import tqdm
    from PIL import Image
    from skimage import color
    from skimage.util import img_as_ubyte
    '''Convert videos to contrast adjusted videos of other formats'''
    ## get videos into a list in video folder
    videoDir = videos
    videolist = []
    for i in os.listdir(videoDir):
        if i.endswith('.seq'):
            videolist.append(os.path.join(videoDir,i))


    for video in videolist:
        vname = str(video)[:-4]
        f = open(video,'rb')
        info = readHeader(f)
        nframes = info.numFrames
        pos,frameSize = posFrame(f,nframes)
        fps=info.fps
        size = (info.width, info.height)
        extra=4
        if startF is None:
            startF=0
        if endF is None:
            endF=nframes
        if outtype=='avi':
            print('Coming soon')
        if outtype=='mp4':
            outname=os.path.join(vname + '.mp4')
            videowriter=cv2.VideoWriter(outname,cv2.VideoWriter_fourcc('m','p','4','v'),fps,size,isColor=True)
        for index in tqdm(range(startF,endF)):
            f.seek(pos[index]+extra*(index+1),0)
            imgdata=f.read(frameSize[index])
            image=Image.open(io.BytesIO(imgdata))
            image=img_as_ubyte(image)
            if clahe:
                image=cv2.createCLAHE(clipLimit=2,tileGridSize=(16,16)).apply(image)
            image=color.gray2rgb(image)
            frame=image
            videowriter.write(frame)
        f.close()
        videowriter.release()

    print('Finish conversion.') 
Example #23
Source File: ridge_map.py    From ridge_map with MIT License 4 votes vote down vote up
def preprocess(
        self, *, values=None, water_ntile=10, lake_flatness=3, vertical_ratio=40
    ):
        """Get map data ready for plotting.

        You can do this yourself, and pass an array directly to plot_map. This
        gathers all nan values, the lowest `water_ntile` percentile of elevations,
        and anything that is flat enough, and sets the values to `nan`, so no line
        is drawn. It also exaggerates the vertical scale, which can be nice for flat
        or mountainy areas.

        Parameters
        ----------
        values : np.ndarray
            An array to process, or fetch the elevation data lazily here.
        water_ntile : float in [0, 100]
            Percentile below which to delete data. Useful for coasts or rivers.
            Set to 0 to not delete any data.
        lake_flatness : int
            How much the elevation can change within 3 squares to delete data.
            Higher values delete more data. Useful for rivers, lakes, oceans.
        vertical_ratio : float > 0
            How much to exaggerate hills. Kind of arbitrary. 40 is reasonable,
            but try bigger and smaller values!

        Returns
        -------
        np.ndarray
            Processed data.
        """
        if values is None:
            values = self.get_elevation_data()
        nan_vals = np.isnan(values)

        values[nan_vals] = np.nanmin(values)
        values = (values - np.min(values)) / (np.max(values) - np.min(values))

        is_water = values < np.percentile(values, water_ntile)
        is_lake = rank.gradient(img_as_ubyte(values), square(3)) < lake_flatness

        values[nan_vals] = np.nan
        values[np.logical_or(is_water, is_lake)] = np.nan
        values = vertical_ratio * values[-1::-1]  # switch north and south
        return values

    # pylint: disable=too-many-arguments,too-many-locals 
Example #24
Source File: pf5_pixel_symmetry.py    From aim with MIT License 4 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img = np.array(img)
    img_la = color.rgb2gray(img)
    img_la = util.img_as_ubyte(img_la)

    # See sigma here: https://dsp.stackexchange.com/questions/4716/differences-between-opencv-canny-and-matlab-canny
    img_la = cv2.GaussianBlur(img_la, (7, 7), 2)
    edges = cv2.Canny(img_la, 0.11, 0.27)

    height, width = edges.shape

    # Set all pixels in radius of an edge pixel to 0
    # This is not going good yet
    radius = 3
    all_key = 0
    for y in range(height):
        for x in range(width):
            if edges[y][x] != 0:
                all_key += 1
                pixels_in_radius = get_pixels_in_radius(x, y, width, height, radius)
                for pixel in pixels_in_radius:
                    edges[pixel[1], pixel[0]] = 0

    img = Image.fromarray(edges, 'L')
    # img.show()
    symmetry_radius = 4
    
    # Check vertical symmetry
    sym_key = 0
    for y in range(height):
        for x in range(width / 2):
            if edges[y][x] != 0:
                vertical_pixels = get_pixels_in_radius(width - x, y, width, height, symmetry_radius)
                horizontal_pixels = get_pixels_in_radius(x, height - y, width, height, symmetry_radius)

                for pixel in vertical_pixels:
                    if edges[int(pixel[1]), int(pixel[0])] != 0:
                        sym_key += 1
                        break

                for pixel in horizontal_pixels:
                    if edges[int(pixel[1]), int(pixel[0])] != 0:
                        sym_key += 1
                        break

    try:
        sym_normalized = (float(sym_key) / float(all_key)) * ((float((all_key - 1) * symmetry_radius) / float(width * height)) ** -1)
    except ZeroDivisionError:
        sym_normalized = 0

    return [sym_normalized] 
Example #25
Source File: pf2_edge_congestion.py    From aim with MIT License 4 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img = np.array(img)
    img = util.img_as_ubyte(img)
    height, width, depth = img.shape
    borders = np.zeros((img.shape[0], img.shape[1]), dtype=np.int)

    # Do edge detection. create_border return 0 or 255 depending on the difference with neigboring pixels
    for x in range(1, width - 1):
        for y in range(1, height - 1):
            borders[y][x] = create_border(img, borders, y, x)

    count_edge = 0
    count_uncongested = 0
    threshold = 4 # Paper says 20, this is insane. The amount of pixels a person needs to differentiate between two elements

    # Create numpy array from list
    borders = np.array(borders)

    # Assumme screen border is always a border
    for x in range(threshold, width - threshold):
        for y in range(threshold, height - threshold):
            if borders[y][x] == 255:
                count_edge += 1

                # Sum left, right, up, down for number of pixels in threshold
                arr_right = borders[y, x + 1:x + threshold]
                sum_right = sum(arr_right)
                arr_left = borders[y, x - threshold:x - 1]
                sum_left = sum(arr_left)
                arr_up = borders[y + 1:y + threshold, x]
                sum_up = sum(arr_up)
                arr_down = borders[y - threshold:y - 1, x]
                sum_down = sum(arr_down)

                # If the sum is zero, it means there are no other pixels nearby. It needs to be in all directions non-0
                # for a pixel to be congested
                if sum_right == 0 or sum_left == 0 or sum_up == 0 or sum_down == 0:
                    count_uncongested += 1

    try:
        count_congested = count_edge - count_uncongested
        result = float(count_congested) / float(count_edge)
    except ZeroDivisionError:
        result = 0

    return [result] 
Example #26
Source File: cp4_HSV_unique.py    From aim with MIT License 4 votes vote down vote up
def execute(b64):
    b64 = base64.b64decode(b64)
    b64 = BytesIO(b64)
    img = Image.open(b64)
    img= np.array(img)
    img = util.img_as_ubyte(img)
    img = color.rgb2hsv(img)
    img = img.reshape(-1, 3)
    img = [tuple(l) for l in img]

    hist = collections.Counter(img)
    hist = hist.items()

    hsv_unique = []
    count = []
    h = []
    s = []
    v = []

    for x in range(len(hist)):
        add = [hist[x][0][0], hist[x][0][1], hist[x][0][2]]
        hsv_unique.append(add)
        count.append(hist[x][1])
        h.append(hist[x][0][0])
        s.append(hist[x][0][1])
        v.append(hist[x][0][2])

    # Get all unique values, still has all counts (so no minimal occurence). This probably needs some changing in the future
    h_unique = np.unique(h)
    s_unique = np.unique(s)
    v_unique = np.unique(v)

    new_hsv = []

    # Only often enough occuring values for hsv
    for x in range(len(hsv_unique)):
        if count[x] > 5:
            new_hsv.append(hsv_unique[x])


    result = [len(new_hsv), len(h_unique), len(s_unique), len(v_unique)]

    return result 
Example #27
Source File: synth.py    From PSSR with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def czi_data_to_tifs(data, axes, shape, crappify, max_scale=1.05):
    np.warnings.filterwarnings('ignore')
    lr_imgs = {}
    lr_up_imgs = {}
    hr_imgs = {}
    channels = shape['C']
    depths = shape['Z']
    times = shape['T']
    x, y = shape['X'], shape['Y']

    for channel in range(channels):
        for depth in range(depths):
            for time_col in range(times):
                try:
                    idx = build_index(
                        axes, {
                            'T': time_col,
                            'C': channel,
                            'Z': depth,
                            'X': slice(0, x),
                            'Y': slice(0, y)
                        })
                    img = data[idx].astype(np.float).copy()
                    img_max = img.max() * max_scale
                    if img_max == 0:
                        continue  #do not save images with no contents.
                    img /= img_max
                    down_img, down_up_img = crappify(img)
                except:
                    continue

                tag = (channel, depth, time_col)
                img = img_as_ubyte(img)
                pimg = PIL.Image.fromarray(img, mode='L')
                small_img = PIL.Image.fromarray(img_as_ubyte(down_img))
                big_img = PIL.Image.fromarray(img_as_ubyte(down_up_img))
                hr_imgs[tag] = pimg
                lr_imgs[tag] = small_img
                lr_up_imgs[tag] = big_img

    np.warnings.filterwarnings('default')
    return hr_imgs, lr_imgs, lr_up_imgs