Python matplotlib.cm.jet_r() Examples

The following are 8 code examples of matplotlib.cm.jet_r(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module matplotlib.cm , or try the search function .
Example #1
Source File: vis_tools.py    From USIP with GNU General Public License v3.0 6 votes vote down vote up
def plot_pc_old(pc_np, z_cutoff=70, birds_view=False, color='height', size=0.3, ax=None):
    # remove large z points
    valid_index = pc_np[:, 2] < z_cutoff
    pc_np = pc_np[valid_index, :]

    if ax is None:
        fig = plt.figure(figsize=(9, 9))
        ax = Axes3D(fig)
    if color == 'height':
        c = pc_np[:, 1]
        ax.scatter(pc_np[:, 0].tolist(), pc_np[:, 1].tolist(), pc_np[:, 2].tolist(), s=size, c=c, cmap=cm.jet_r)
    elif color == 'reflectance':
        assert False
    else:
        ax.scatter(pc_np[:, 0].tolist(), pc_np[:, 1].tolist(), pc_np[:, 2].tolist(), s=size, c=color)

    axisEqual3D(ax)
    if True == birds_view:
        ax.view_init(elev=0, azim=-90)
    else:
        ax.view_init(elev=-45, azim=-90)
    # ax.invert_yaxis()

    return ax 
Example #2
Source File: preprocess_object_recognition_eitel.py    From nideep with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def colorize_depth(depth_map):
    # scale everything to [0, 255]
    sorted_depth = np.unique(np.sort(depth_map.flatten()))
    min_depth = sorted_depth[0]
    max_depth = sorted_depth[len(sorted_depth) - 1]

    depth_map = np.asarray(map(lambda pixel:
                               (pixel - min_depth) * 1.0 / (max_depth - min_depth),
                               depth_map))

    # Apply jet colormap to it
    depth_map = np.uint8(cm.jet_r(depth_map) * 255)
    return depth_map[:, :, 0:3]


# Given a CSV row of metadata, colorize the image and save into a destination 
Example #3
Source File: main.py    From grad-cam-pytorch with MIT License 5 votes vote down vote up
def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:
        alpha = gcam[..., None]
        gcam = alpha * cmap + (1 - alpha) * raw_image
    else:
        gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
    cv2.imwrite(filename, np.uint8(gcam)) 
Example #4
Source File: graphics.py    From rl-agents with MIT License 5 votes vote down vote up
def draw_node(cls, node, surface, origin, size, config):
        import pygame
        cmap = cm.jet_r
        norm = mpl.colors.Normalize(vmin=0, vmax=1 / (1 - config["gamma"]))
        color = cmap(norm(node.get_value()), bytes=True)
        pygame.draw.rect(surface, color, (origin[0], origin[1], size[0], size[1]), 0) 
Example #5
Source File: graphics.py    From rl-agents with MIT License 5 votes vote down vote up
def draw_node(cls, node, surface, origin, size, config):
        import pygame
        cmap = cm.jet_r
        norm = mpl.colors.Normalize(vmin=0, vmax=config["gamma"] / (1 - config["gamma"]))
        n = np.size(node.value)
        for i in range(n):
            v = node.value[i] if n > 1 else node.value
            color = cmap(norm(v), bytes=True)
            pygame.draw.rect(surface, color, (origin[0] + i / n * size[0], origin[1], size[0] / n, size[1]), 0) 
Example #6
Source File: graphics.py    From rl-agents with MIT License 5 votes vote down vote up
def display_highway(cls, agent, surface):
        """
            Particular visualization of the state space that is used for highway_env environments only.

        :param agent: the agent to be displayed
        :param surface: the surface on which the agent is displayed.
        """
        import pygame
        norm = mpl.colors.Normalize(vmin=-2, vmax=2)
        cmap = cm.jet_r
        try:
            grid_shape = agent.mdp.original_shape
        except AttributeError:
            grid_shape = cls.highway_module.finite_mdp.compute_ttc_grid(agent.env, time_quantization=1., horizon=10.).shape
        cell_size = (surface.get_width() // grid_shape[2], surface.get_height() // (grid_shape[0] * grid_shape[1]))
        speed_size = surface.get_height() // grid_shape[0]
        value = agent.get_state_value().reshape(grid_shape)
        for h in range(grid_shape[0]):
            for i in range(grid_shape[1]):
                for j in range(grid_shape[2]):
                    color = cmap(norm(value[h, i, j]), bytes=True)
                    pygame.draw.rect(surface, color, (
                        j * cell_size[0], i * cell_size[1] + h * speed_size, cell_size[0], cell_size[1]), 0)
            pygame.draw.line(surface, cls.BLACK,
                             (0, h * speed_size), (grid_shape[2] * cell_size[0], h * speed_size), 1)
        states, actions = agent.plan_trajectory(agent.mdp.state)
        for state in states:
            (h, i, j) = np.unravel_index(state, grid_shape)
            pygame.draw.rect(surface, cls.RED,
                             (j * cell_size[0], i * cell_size[1] + h * speed_size, cell_size[0], cell_size[1]), 1) 
Example #7
Source File: graphics.py    From rl-agents with MIT License 5 votes vote down vote up
def display(cls, agent, surface, sim_surface=None, display_text=True):
        """
            Display the action-values for the current state

        :param agent: the DQNAgent to be displayed
        :param surface: the pygame surface on which the agent is displayed
        :param sim_surface: the pygame surface on which the env is rendered
        :param display_text: whether to display the action values as text
        """
        import pygame
        action_values = agent.get_state_action_values(agent.previous_state)
        action_distribution = agent.action_distribution(agent.previous_state)

        cell_size = (surface.get_width() // len(action_values), surface.get_height())
        pygame.draw.rect(surface, cls.BLACK, (0, 0, surface.get_width(), surface.get_height()), 0)

        # Display node value
        for action, value in enumerate(action_values):
            cmap = cm.jet_r
            norm = mpl.colors.Normalize(vmin=0, vmax=1/(1-agent.config["gamma"]))
            color = cmap(norm(value), bytes=True)
            pygame.draw.rect(surface, color, (cell_size[0]*action, 0, cell_size[0], cell_size[1]), 0)

            if display_text:
                font = pygame.font.Font(None, 15)
                text = "v={:.2f} / p={:.2f}".format(value, action_distribution[action])
                text = font.render(text,
                                   1, (10, 10, 10), (255, 255, 255))
                surface.blit(text, (cell_size[0]*action, 0))

        if sim_surface and hasattr(agent.value_net, "get_attention_matrix"):
            cls.display_vehicles_attention(agent, sim_surface) 
Example #8
Source File: demo.py    From deeplab-pytorch with MIT License 4 votes vote down vote up
def live(config_path, model_path, cuda, crf, camera_id):
    """
    Inference from camera stream
    """

    # Setup
    CONFIG = OmegaConf.load(config_path)
    device = get_device(cuda)
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True

    classes = get_classtable(CONFIG)
    postprocessor = setup_postprocessor(CONFIG) if crf else None

    model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
    state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
    model.load_state_dict(state_dict)
    model.eval()
    model.to(device)
    print("Model:", CONFIG.MODEL.NAME)

    # UVC camera stream
    cap = cv2.VideoCapture(camera_id)
    cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV"))

    def colorize(labelmap):
        # Assign a unique color to each label
        labelmap = labelmap.astype(np.float32) / CONFIG.DATASET.N_CLASSES
        colormap = cm.jet_r(labelmap)[..., :-1] * 255.0
        return np.uint8(colormap)

    def mouse_event(event, x, y, flags, labelmap):
        # Show a class name of a mouse-overed pixel
        label = labelmap[y, x]
        name = classes[label]
        print(name)

    window_name = "{} + {}".format(CONFIG.MODEL.NAME, CONFIG.DATASET.NAME)
    cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

    while True:
        _, frame = cap.read()
        image, raw_image = preprocessing(frame, device, CONFIG)
        labelmap = inference(model, image, raw_image, postprocessor)
        colormap = colorize(labelmap)

        # Register mouse callback function
        cv2.setMouseCallback(window_name, mouse_event, labelmap)

        # Overlay prediction
        cv2.addWeighted(colormap, 0.5, raw_image, 0.5, 0.0, raw_image)

        # Quit by pressing "q" key
        cv2.imshow(window_name, raw_image)
        if cv2.waitKey(10) == ord("q"):
            break