Python numpy.float32() Examples

The following are code examples for showing how to use numpy.float32(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 8 votes vote down vote up
def classification_metrics(y, y_pred, threshold):
    metrics = {}
    metrics['threshold'] = threshold_from_predictions(y, y_pred, 0)
    metrics['np.std(y_pred)'] = np.std(y_pred)
    metrics['positive_frac_batch'] = float(np.count_nonzero(y == True)) / len(y)
    denom = np.count_nonzero(y == False)
    num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
    if denom > 0:
        metrics['fpr'] = float(num) / float(denom)
    if any(y) and not all(y):
        metrics['auc'] = roc_auc_score(y, y_pred)
        y_pred_bool = y_pred >= threshold
        if (any(y_pred_bool) and not all(y_pred_bool)):
            metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
            metrics['recall'] = recall_score(y, y_pred_bool)
    return metrics 
Example 2
Project: FCN_train   Author: 315386775   File: image_channel.py    (license) View Source Project 8 votes vote down vote up
def preprocess(image):
    """Takes an image and apply preprocess"""
    # ????????????
    image = cv2.resize(image, (data_shape, data_shape))
    # ?? BGR ? RGB
    image = image[:, :, (2, 1, 0)]
    # ?mean?????float
    image = image.astype(np.float32)
    # ? mean
    image -= np.array([123, 117, 104])
    # ??? [batch-channel-height-width]
    image = np.transpose(image, (2, 0, 1))
    image = image[np.newaxis, :]
    # ?? ndarray
    image = nd.array(image)
    return image 
Example 3
Project: pyku   Author: dubvulture   File: sudoku_steps.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def remove_artifacts(self, image):
        """
        Remove the connected components that are not within the parameters
        Operates in place
        :param image: sudoku's thresholded image w/o grid
        :return: None
        """
        labeled, features = label(image, structure=CROSS)
        lbls = np.arange(1, features + 1)
        areas = extract_feature(image, labeled, lbls, np.sum,
                                np.uint32, 0)
        sides = extract_feature(image, labeled, lbls, min_side,
                                np.float32, 0, True)
        diags = extract_feature(image, labeled, lbls, diagonal,
                                np.float32, 0, True)

        for index in lbls:
            area = areas[index - 1] / 255
            side = sides[index - 1]
            diag = diags[index - 1]
            if side < 5 or side > 20 \
                    or diag < 15 or diag > 25 \
                    or area < 40:
                image[labeled == index] = 0
        return None 
Example 4
Project: GELUs   Author: hendrycks   File: twitter_pos.py    (MIT License) View Source Project 6 votes vote down vote up
def word_list_to_embedding(words, embeddings, embedding_dimension=50):
    '''
    :param words: an n x (2*window_size + 1) matrix from data_to_mat
    :param embeddings: an embedding dictionary where keys are strings and values
    are embeddings; the output from embeddings_to_dict
    :param embedding_dimension: the dimension of the values in embeddings; in this
    assignment, embedding_dimension=50
    :return: an n x ((2*window_size + 1)*embedding_dimension) matrix where each entry of the
    words matrix is replaced with its embedding
    '''
    m, n = words.shape
    words = words.reshape((-1))

    return np.array([embeddings[w] for w in words], dtype=np.float32).reshape(m, n*embedding_dimension)

#
# End Twitter Helper Functions
# 
Example 5
Project: facerecognition   Author: guoxiaolu   File: facenet.py    (license) View Source Project 6 votes vote down vote up
def put_images_on_grid(images, shape=(16,8)):
    nrof_images = images.shape[0]
    img_size = images.shape[1]
    bw = 3
    img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
    for i in range(shape[1]):
        x_start = i*(img_size+bw)+bw
        for j in range(shape[0]):
            img_index = i*shape[0]+j
            if img_index>=nrof_images:
                break
            y_start = j*(img_size+bw)+bw
            img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
        if img_index>=nrof_images:
            break
    return img 
Example 6
Project: treecat   Author: posterior   File: plotting.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def layout_tree(correlation):
    """Layout tree for visualization with e.g. matplotlib.

    Args:
        correlation: A [V, V]-shaped numpy array of latent correlations.

    Returns:
        A [V, 3]-shaped numpy array of spectral positions of vertices.
    """
    assert len(correlation.shape) == 2
    assert correlation.shape[0] == correlation.shape[1]
    assert correlation.dtype == np.float32

    laplacian = -correlation
    np.fill_diagonal(laplacian, 0)
    np.fill_diagonal(laplacian, -laplacian.sum(axis=0))
    evals, evects = scipy.linalg.eigh(laplacian, eigvals=[1, 2, 3])
    assert np.all(evals > 0)
    assert evects.shape[1] == 3
    return evects 
Example 7
Project: treecat   Author: posterior   File: util_test.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def test_quantize_from_probs2(size, resolution):
    set_random_seed(make_seed(size, resolution))
    probs = np.exp(np.random.random(size)).astype(np.float32)
    probs2 = probs.reshape((1, size))
    quantized = quantize_from_probs2(probs2, resolution)
    assert quantized.shape == probs2.shape
    assert quantized.dtype == np.int8
    assert np.all(quantized.sum(axis=1) == resolution)

    # Check that quantized result is closer to target than any other value.
    quantized = quantized.reshape((size, ))
    target = resolution * probs / probs.sum()
    distance = np.abs(quantized - target).sum()
    for combo in itertools.combinations(range(size), resolution):
        other = np.zeros(size, np.int8)
        for i in combo:
            other[i] += 1
        assert other.sum() == resolution
        other_distance = np.abs(other - target).sum()
        assert distance <= other_distance 
Example 8
Project: treecat   Author: posterior   File: training.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def sample_tree(self):
        """Samples a random tree.

        Returns:
            A pair (edges, edge_logits), where:
                edges: A list of (vertex, vertex) pairs.
                edge_logits: A [K]-shaped numpy array of edge logits.
        """
        logger.info('TreeCatTrainer.sample_tree given %d rows',
                    len(self._added_rows))
        SERIES.sample_tree_num_rows.append(len(self._added_rows))
        complete_grid = self._tree.complete_grid
        edge_logits = self.compute_edge_logits()
        assert edge_logits.shape[0] == complete_grid.shape[1]
        assert edge_logits.dtype == np.float32
        edges = self.get_edges()
        edges = sample_tree(complete_grid, edge_logits, edges)
        return edges, edge_logits 
Example 9
Project: treecat   Author: posterior   File: training.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def treecat_add_cell(
        feature_type,
        ragged_index,
        data_row,
        message,
        feat_probs,
        meas_probs,
        v, ):
    if feature_type == TY_MULTINOMIAL:
        beg, end = ragged_index[v:v + 2]
        feat_block = feat_probs[beg:end, :]
        meas_block = meas_probs[v, :]
        for c, count in enumerate(data_row[beg:end]):
            for _ in range(count):
                message *= feat_block[c, :]
                message /= meas_block
                feat_block[c, :] += np.float32(1)
                meas_block += np.float32(1)
    else:
        raise NotImplementedError 
Example 10
Project: treecat   Author: posterior   File: training.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def __init__(self, data, tree_prior, config):
        """Initialize a model with an empty subsample.

        Args:
            data: An [N, V]-shaped numpy array of real-valued data.
            tree_prior: A [K]-shaped numpy array of prior edge log odds, where
                K is the number of edges in the complete graph on V vertices.
            config: A global config dict.
        """
        assert isinstance(data, np.ndarray)
        data = np.asarray(data, np.float32)
        assert len(data.shape) == 2
        N, V = data.shape
        D = config['model_latent_dim']
        E = V - 1  # Number of edges in the tree.
        TreeTrainer.__init__(self, N, V, tree_prior, config)
        self._data = data
        self._latent = np.zeros([N, V, D], np.float32)

        # This is symmetric positive definite.
        self._vert_ss = np.zeros([V, D, D], np.float32)
        # This is arbitrary (not necessarily symmetric).
        self._edge_ss = np.zeros([E, D, D], np.float32)
        # This represents (count, mean, covariance).
        self._feat_ss = np.zeros([V, D, 1 + 1 + D], np.float32) 
Example 11
Project: treecat   Author: posterior   File: serving.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def observed_perplexity(self, counts):
        """Compute perplexity = exp(entropy) of observed variables.

        Perplexity is an information theoretic measure of the number of
        clusters or latent classes. Perplexity is a real number in the range
        [1, M], where M is model_num_clusters.

        Args:
            counts: A [V]-shaped array of multinomial counts.

        Returns:
            A [V]-shaped numpy array of perplexity.
        """
        V, E, M, R = self._VEMR
        if counts is not None:
            counts = np.ones(V, dtype=np.int8)
        assert counts.shape == (V, )
        assert counts.dtype == np.int8
        assert np.all(counts > 0)
        observed_entropy = np.empty(V, dtype=np.float32)
        for v in range(V):
            beg, end = self._ragged_index[v:v + 2]
            probs = np.dot(self._feat_cond[beg:end, :], self._vert_probs[v, :])
            observed_entropy[v] = multinomial_entropy(probs, counts[v])
        return np.exp(observed_entropy) 
Example 12
Project: treecat   Author: posterior   File: generate.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def generate_model_file(num_rows, num_cols, num_cats=4, rate=1.0):
    """Generate a random model.

    Returns:
        The path to a gzipped pickled model.
    """
    path = os.path.join(DATA, '{}-{}-{}-{:0.1f}.model.pkz'.format(
        num_rows, num_cols, num_cats, rate))
    V = num_cols
    K = V * (V - 1) // 2
    if os.path.exists(path):
        return path
    print('Generating {}'.format(path))
    if not os.path.exists(DATA):
        os.makedirs(DATA)
    dataset_path = generate_dataset_file(num_rows, num_cols, num_cats, rate)
    dataset = pickle_load(dataset_path)
    table = dataset['table']
    tree_prior = np.zeros(K, dtype=np.float32)
    config = make_config(learning_init_epochs=5)
    model = train_model(table, tree_prior, config)
    pickle_dump(model, path)
    return path 
Example 13
Project: youtube-8m   Author: wangheda   File: losses.py    (license) View Source Project 6 votes vote down vote up
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      if weights is not None:
        print cross_entropy_loss, weights
        weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
        print "create weighted_loss", weighted_loss
        return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
      else:
        return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
Example 14
Project: youtube-8m   Author: wangheda   File: losses.py    (license) View Source Project 6 votes vote down vote up
def calculate_loss(self, predictions, support_predictions, labels, **unused_params):
    """ 
    support_predictions batch_size x num_models x num_classes
    predictions = tf.reduce_mean(support_predictions, axis=1)
    """
    model_count = tf.shape(support_predictions)[1]
    vocab_size = tf.shape(support_predictions)[2]

    mean_predictions = tf.reduce_mean(support_predictions, axis=1, keep_dims=True)
    support_labels = tf.tile(tf.expand_dims(tf.cast(labels, dtype=tf.float32), axis=1), multiples=[1,model_count,1])
    support_means = tf.stop_gradient(tf.tile(mean_predictions, multiples=[1,model_count,1]))

    support_predictions = tf.reshape(support_predictions, shape=[-1,model_count*vocab_size])
    support_labels = tf.reshape(support_labels, shape=[-1,model_count*vocab_size])
    support_means = tf.reshape(support_means, shape=[-1,model_count*vocab_size])

    ce_loss_fn = CrossEntropyLoss()
    # The cross entropy between predictions and ground truth
    cross_entropy_loss = ce_loss_fn.calculate_loss(support_predictions, support_labels, **unused_params)
    # The cross entropy between predictions and mean predictions
    divergence = ce_loss_fn.calculate_loss(support_predictions, support_means, **unused_params)

    loss = cross_entropy_loss * (1.0 - FLAGS.support_loss_percent) - divergence * FLAGS.support_loss_percent
    return loss 
Example 15
Project: youtube-8m   Author: wangheda   File: losses.py    (license) View Source Project 6 votes vote down vote up
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      if weights is not None:
        print cross_entropy_loss, weights
        weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
        print "create weighted_loss", weighted_loss
        return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
      else:
        return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
Example 16
Project: neurobind   Author: Kyubyong   File: data_load.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def get_batch_data():
    # Load data
    X, Y = load_data()

    # calc total batch count
    num_batch = len(X) // hp.batch_size

    # Convert to tensor
    X = tf.convert_to_tensor(X, tf.int32)
    Y = tf.convert_to_tensor(Y, tf.float32)

    # Create Queues
    input_queues = tf.train.slice_input_producer([X, Y])

    # create batch queues
    x, y = tf.train.batch(input_queues,
                          num_threads=8,
                          batch_size=hp.batch_size,
                          capacity=hp.batch_size * 64,
                          allow_smaller_final_batch=False)

    return x, y, num_batch  # (N, T), (N, T), () 
Example 17
Project: human-rl   Author: gsastry   File: classifier_utils.py    (MIT License) View Source Project 6 votes vote down vote up
def metrics(self, X, y):
        metrics = {}
        y_pred_pair, loss = self.predict_proba_with_loss(X, y)
        y_pred = y_pred_pair[:,1]  ## From softmax pair to prob of catastrophe
        
        metrics['loss'] = loss
        threshold = self.threshold_from_data(X, y)
        metrics['threshold'] = threshold
        metrics['np.std(y_pred)'] = np.std(y_pred)
        denom = np.count_nonzero(y == False)
        num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
        metrics['fpr'] = float(num) / float(denom)
        if any(y) and not all(y):
            metrics['auc'] = roc_auc_score(y, y_pred)
            y_pred_bool = y_pred >= threshold
            if (any(y_pred_bool) and not all(y_pred_bool)):
                metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
                metrics['recall'] = recall_score(y, y_pred_bool)

        return metrics 
Example 18
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 6 votes vote down vote up
def classification_metrics(y, y_pred, threshold):
    metrics = {}
    metrics['threshold'] = threshold_from_predictions(y, y_pred, 0)
    metrics['np.std(y_pred)'] = np.std(y_pred)
    metrics['positive_frac_batch'] = float(np.count_nonzero(y == True)) / len(y)
    denom = np.count_nonzero(y == False)
    num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
    if denom > 0:
        metrics['fpr'] = float(num) / float(denom)
    if any(y) and not all(y):
        metrics['auc'] = roc_auc_score(y, y_pred)
        y_pred_bool = y_pred >= threshold
        if (any(y_pred_bool) and not all(y_pred_bool)):
            metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
            metrics['recall'] = recall_score(y, y_pred_bool)
    return metrics 
Example 19
Project: human-rl   Author: gsastry   File: classifier_utils.py    (MIT License) View Source Project 6 votes vote down vote up
def metrics(self, X, y):
        metrics = {}
        y_pred_pair, loss = self.predict_proba_with_loss(X, y)
        y_pred = y_pred_pair[:,1]  ## From softmax pair to prob of catastrophe
        
        metrics['loss'] = loss
        threshold = self.threshold_from_data(X, y)
        metrics['threshold'] = threshold
        metrics['np.std(y_pred)'] = np.std(y_pred)
        denom = np.count_nonzero(y == False)
        num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
        metrics['fpr'] = float(num) / float(denom)
        if any(y) and not all(y):
            metrics['auc'] = roc_auc_score(y, y_pred)
            y_pred_bool = y_pred >= threshold
            if (any(y_pred_bool) and not all(y_pred_bool)):
                metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
                metrics['recall'] = recall_score(y, y_pred_bool)

        return metrics 
Example 20
Project: human-rl   Author: gsastry   File: classifier_utils.py    (MIT License) View Source Project 6 votes vote down vote up
def metrics(self, X, y):
        metrics = {}
        y_pred_pair, loss = self.predict_proba_with_loss(X, y)
        y_pred = y_pred_pair[:,1]  ## From softmax pair to prob of catastrophe
        
        metrics['loss'] = loss
        threshold = self.threshold_from_data(X, y)
        metrics['threshold'] = threshold
        metrics['np.std(y_pred)'] = np.std(y_pred)
        denom = np.count_nonzero(y == False)
        num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
        metrics['fpr'] = float(num) / float(denom)
        if any(y) and not all(y):
            metrics['auc'] = roc_auc_score(y, y_pred)
            y_pred_bool = y_pred >= threshold
            if (any(y_pred_bool) and not all(y_pred_bool)):
                metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
                metrics['recall'] = recall_score(y, y_pred_bool)

        return metrics 
Example 21
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 6 votes vote down vote up
def classification_metrics(y, y_pred, threshold):
    metrics = {}
    metrics['threshold'] = threshold_from_predictions(y, y_pred, 0)
    metrics['np.std(y_pred)'] = np.std(y_pred)
    metrics['positive_frac_batch'] = float(np.count_nonzero(y == True)) / len(y)
    denom = np.count_nonzero(y == False)
    num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
    if denom > 0:
        metrics['fpr'] = float(num) / float(denom)
    if any(y) and not all(y):
        metrics['auc'] = roc_auc_score(y, y_pred)
        y_pred_bool = y_pred >= threshold
        if (any(y_pred_bool) and not all(y_pred_bool)):
            metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
            metrics['recall'] = recall_score(y, y_pred_bool)
    return metrics 
Example 22
Project: human-rl   Author: gsastry   File: classifier_utils.py    (MIT License) View Source Project 6 votes vote down vote up
def metrics(self, X, y):
        metrics = {}
        y_pred_pair, loss = self.predict_proba_with_loss(X, y)
        y_pred = y_pred_pair[:,1]  ## From softmax pair to prob of catastrophe
        
        metrics['loss'] = loss
        threshold = self.threshold_from_data(X, y)
        metrics['threshold'] = threshold
        metrics['np.std(y_pred)'] = np.std(y_pred)
        denom = np.count_nonzero(y == False)
        num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
        metrics['fpr'] = float(num) / float(denom)
        if any(y) and not all(y):
            metrics['auc'] = roc_auc_score(y, y_pred)
            y_pred_bool = y_pred >= threshold
            if (any(y_pred_bool) and not all(y_pred_bool)):
                metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
                metrics['recall'] = recall_score(y, y_pred_bool)

        return metrics 
Example 23
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 6 votes vote down vote up
def classification_metrics(y, y_pred, threshold):
    metrics = {}
    metrics['threshold'] = threshold_from_predictions(y, y_pred, 0)
    metrics['np.std(y_pred)'] = np.std(y_pred)
    metrics['positive_frac_batch'] = float(np.count_nonzero(y == True)) / len(y)
    denom = np.count_nonzero(y == False)
    num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
    if denom > 0:
        metrics['fpr'] = float(num) / float(denom)
    if any(y) and not all(y):
        metrics['auc'] = roc_auc_score(y, y_pred)
        y_pred_bool = y_pred >= threshold
        if (any(y_pred_bool) and not all(y_pred_bool)):
            metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
            metrics['recall'] = recall_score(y, y_pred_bool)
    return metrics 
Example 24
Project: human-rl   Author: gsastry   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b 
Example 25
Project: human-rl   Author: gsastry   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def __init__(self, ob_space, ac_space, layers=[256], **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        rank = len(ob_space)

        if rank == 3: # pixel input
            for i in range(4):
                x = tf.nn.elu(conv2d(x, 32, "c{}".format(i + 1), [3, 3], [2, 2]))
        elif rank == 1: # plain features
            #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
            pass
        else:
            raise TypeError("observation space must have rank 1 or 3, got %d" % rank)

        x = flatten(x)

        for i, layer in enumerate(layers):
            x = tf.nn.elu(linear(x, layer, "l{}".format(i + 1), tf.contrib.layers.xavier_initializer()))

        self.logits = linear(x, ac_space, "action", tf.contrib.layers.xavier_initializer())
        self.vf = tf.reshape(linear(x, 1, "value", tf.contrib.layers.xavier_initializer()), [-1])
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
        self.state_in = [] 
Example 26
Project: human-rl   Author: gsastry   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def __init__(self, ob_space, ac_space, size=256, **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim
        x = tf.expand_dims(flatten(x), 1)

        gru = rnn.GRUCell(size)

        h_init = np.zeros((1, size), np.float32)
        self.state_init = [h_init]
        h_in = tf.placeholder(tf.float32, [1, size])
        self.state_in = [h_in]

        gru_outputs, gru_state = tf.nn.dynamic_rnn(
            gru, x, initial_state=h_in, sequence_length=[size], time_major=True)
        x = tf.reshape(gru_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [gru_state[:1]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name) 
Example 27
Project: human-rl   Author: gsastry   File: classifier_utils.py    (MIT License) View Source Project 6 votes vote down vote up
def metrics(self, X, y):
        metrics = {}
        y_pred_pair, loss = self.predict_proba_with_loss(X, y)
        y_pred = y_pred_pair[:,1]  ## From softmax pair to prob of catastrophe
        
        metrics['loss'] = loss
        threshold = self.threshold_from_data(X, y)
        metrics['threshold'] = threshold
        metrics['np.std(y_pred)'] = np.std(y_pred)
        denom = np.count_nonzero(y == False)
        num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
        metrics['fpr'] = float(num) / float(denom)
        if any(y) and not all(y):
            metrics['auc'] = roc_auc_score(y, y_pred)
            y_pred_bool = y_pred >= threshold
            if (any(y_pred_bool) and not all(y_pred_bool)):
                metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
                metrics['recall'] = recall_score(y, y_pred_bool)

        return metrics 
Example 28
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 6 votes vote down vote up
def classification_metrics(y, y_pred, threshold):
    metrics = {}
    metrics['threshold'] = threshold_from_predictions(y, y_pred, 0)
    metrics['np.std(y_pred)'] = np.std(y_pred)
    metrics['positive_frac_batch'] = float(np.count_nonzero(y == True)) / len(y)
    denom = np.count_nonzero(y == False)
    num = np.count_nonzero(np.logical_and(y == False, y_pred >= threshold))
    if denom > 0:
        metrics['fpr'] = float(num) / float(denom)
    if any(y) and not all(y):
        metrics['auc'] = roc_auc_score(y, y_pred)
        y_pred_bool = y_pred >= threshold
        if (any(y_pred_bool) and not all(y_pred_bool)):
            metrics['precision'] = precision_score(np.array(y, dtype=np.float32), y_pred_bool)
            metrics['recall'] = recall_score(y, y_pred_bool)
    return metrics 
Example 29
Project: human-rl   Author: gsastry   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b 
Example 30
Project: human-rl   Author: gsastry   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def __init__(self, ob_space, ac_space, layers=[256], **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        rank = len(ob_space)

        if rank == 3: # pixel input
            for i in range(4):
                x = tf.nn.elu(conv2d(x, 32, "c{}".format(i + 1), [3, 3], [2, 2]))
        elif rank == 1: # plain features
            #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
            pass
        else:
            raise TypeError("observation space must have rank 1 or 3, got %d" % rank)

        x = flatten(x)

        for i, layer in enumerate(layers):
            x = tf.nn.elu(linear(x, layer, "l{}".format(i + 1), tf.contrib.layers.xavier_initializer()))

        self.logits = linear(x, ac_space, "action", tf.contrib.layers.xavier_initializer())
        self.vf = tf.reshape(linear(x, 1, "value", tf.contrib.layers.xavier_initializer()), [-1])
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
        self.state_in = [] 
Example 31
Project: human-rl   Author: gsastry   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def __init__(self, ob_space, ac_space, size=256, **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim
        x = tf.expand_dims(flatten(x), 1)

        gru = rnn.GRUCell(size)

        h_init = np.zeros((1, size), np.float32)
        self.state_init = [h_init]
        h_in = tf.placeholder(tf.float32, [1, size])
        self.state_in = [h_in]

        gru_outputs, gru_state = tf.nn.dynamic_rnn(
            gru, x, initial_state=h_in, sequence_length=[size], time_major=True)
        x = tf.reshape(gru_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [gru_state[:1]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name) 
Example 32
Project: distributional_perspective_on_RL   Author: Kiwoo   File: tf_util.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, shape, name=None):
        """Takes input in uint8 format which is cast to float32 and divided by 255
        before passing it to the model.

        On GPU this ensures lower data transfer times.

        Parameters
        ----------
        shape: [int]
            shape of the tensor.
        name: str
            name of the underlying placeholder
        """

        super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name))
        self._shape = shape
        self._output = tf.cast(super().get(), tf.float32) / 255.0 
Example 33
Project: pyelastix   Author: almarklein   File: pyelastix.py    (MIT License) View Source Project 6 votes vote down vote up
def _get_dtype_maps():
    """ Get dictionaries to map numpy data types to ITK types and the 
    other way around.
    """
    
    # Define pairs
    tmp = [ (np.float32, 'MET_FLOAT'),  (np.float64, 'MET_DOUBLE'),
            (np.uint8, 'MET_UCHAR'),    (np.int8, 'MET_CHAR'),
            (np.uint16, 'MET_USHORT'),  (np.int16, 'MET_SHORT'),
            (np.uint32, 'MET_UINT'),    (np.int32, 'MET_INT'),
            (np.uint64, 'MET_ULONG'),   (np.int64, 'MET_LONG') ]
    
    # Create dictionaries
    map1, map2 = {}, {}
    for np_type, itk_type in tmp:
        map1[np_type.__name__] = itk_type
        map2[itk_type] = np_type.__name__
    
    # Done
    return map1, map2 
Example 34
Project: seq2seq   Author: google   File: losses_test.py    (license) View Source Project 6 votes vote down vote up
def test_op(self):
    logits = np.random.randn(self.sequence_length, self.batch_size,
                             self.vocab_size)
    logits = logits.astype(np.float32)
    sequence_length = np.array([1, 2, 3, 4])
    targets = np.random.randint(0, self.vocab_size,
                                [self.sequence_length, self.batch_size])
    losses = seq2seq_losses.cross_entropy_sequence_loss(logits, targets,
                                                        sequence_length)

    with self.test_session() as sess:
      losses_ = sess.run(losses)

    # Make sure all losses not past the sequence length are > 0
    np.testing.assert_array_less(np.zeros_like(losses_[:1, 0]), losses_[:1, 0])
    np.testing.assert_array_less(np.zeros_like(losses_[:2, 1]), losses_[:2, 1])
    np.testing.assert_array_less(np.zeros_like(losses_[:3, 2]), losses_[:3, 2])

    # Make sure all losses past the sequence length are 0
    np.testing.assert_array_equal(losses_[1:, 0], np.zeros_like(losses_[1:, 0]))
    np.testing.assert_array_equal(losses_[2:, 1], np.zeros_like(losses_[2:, 1]))
    np.testing.assert_array_equal(losses_[3:, 2], np.zeros_like(losses_[3:, 2])) 
Example 35
Project: seq2seq   Author: google   File: pooling_encoder.py    (license) View Source Project 6 votes vote down vote up
def position_encoding(sentence_size, embedding_size):
  """
  Position Encoding described in section 4.1 of
  End-To-End Memory Networks (https://arxiv.org/abs/1503.08895).

  Args:
    sentence_size: length of the sentence
    embedding_size: dimensionality of the embeddings

  Returns:
    A numpy array of shape [sentence_size, embedding_size] containing
    the fixed position encodings for each sentence position.
  """
  encoding = np.ones((sentence_size, embedding_size), dtype=np.float32)
  ls = sentence_size + 1
  le = embedding_size + 1
  for k in range(1, le):
    for j in range(1, ls):
      encoding[j-1, k-1] = (1.0 - j/float(ls)) - (
          k / float(le)) * (1. - 2. * j/float(ls))
  return encoding 
Example 36
Project: yolo_tensorflow   Author: hizhangp   File: test.py    (MIT License) View Source Project 6 votes vote down vote up
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result 
Example 37
Project: dl-classification   Author: matthieuo   File: inference.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def __init__(self, check_):
        self.img_feed = tf.placeholder(tf.float32)

        self.output_logits = tf.nn.softmax(
            models.foodv_test(
                self.img_feed,
                reg_val=0.0,
                is_train=False,
                dropout_p=1.0))

        self.sess = tf.Session()

        self.checkpoint_name = check_

        saver = tf.train.Saver()
        print("loading model...")

        saver.restore(self.sess, self.checkpoint_name)

        print("Model loaded !") 
Example 38
Project: dl-classification   Author: matthieuo   File: inference.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def has_tomatoes(self, im_path):
        # load the image
        im = Image.open(im_path)
        im = np.asarray(im, dtype=np.float32)
        im = self.prepare_image(im)

        # launch an inference with the image
        pred = self.sess.run(
            self.output_logits, feed_dict={
                self.img_feed: im.eval(
                    session=self.sess)})

        if np.argmax(pred) == 0:
            print("NOT a tomato ! (confidence : ", pred[0, 0], "%)")
        else:
            print("We have a tomato ! (confidence : ", pred[0, 1], "%)") 
Example 39
Project: Deep360Pilot-optical-flow   Author: yenchenlin   File: flo2img.py    (license) View Source Project 6 votes vote down vote up
def read_flow(path, filename):
    flowdata = None
    with open(path + filename + '.flo') as f:
        # Valid .flo file checker
        magic = np.fromfile(f, np.float32, count=1)
        if 202021.25 != magic:
            print 'Magic number incorrect. Invalid .flo file'
        else:
            # Reshape data into 3D array (columns, rows, bands)
            w = int(np.fromfile(f, np.int32, count=1))
            h = int(np.fromfile(f, np.int32, count=1))
            #print 'Reading {}.flo with shape: ({}, {}, 2)'.format(filename, h, w)
            flowdata = np.fromfile(f, np.float32, count=2*w*h)

            # NOTE: numpy shape(h, w, ch) is opposite to image shape(w, h, ch)
            flowdata = np.reshape(flowdata, (h, w, 2))

    return flowdata 
Example 40
Project: j3dview   Author: blank63   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def gl_init(self):
        self.gl_vertex_shader_factory = functools.lru_cache(maxsize=None)(functools.partial(gl.Shader,GL_VERTEX_SHADER))
        self.gl_fragment_shader_factory = functools.lru_cache(maxsize=None)(functools.partial(gl.Shader,GL_FRAGMENT_SHADER))
        self.gl_program_factory = functools.lru_cache(maxsize=None)(GLProgram)
        self.gl_texture_factory = functools.lru_cache(maxsize=None)(gx.texture.GLTexture)

        array_table = {gx.VA_PTNMTXIDX:GLMatrixIndexArray()}
        array_table.update((attribute,array.gl_convert()) for attribute,array in self.array_table.items())

        for shape in self.shapes:
            shape.gl_init(array_table)

        for material in self.materials:
            material.gl_init()

        for texture in self.textures:
            texture.gl_init(self.gl_texture_factory)

        self.gl_joints = [copy.copy(joint) for joint in self.joints]
        self.gl_joint_matrices = numpy.empty((len(self.joints),3,4),numpy.float32)
        self.gl_matrix_table = gl.TextureBuffer(GL_DYNAMIC_DRAW,GL_RGBA32F,(len(self.matrix_descriptors),3,4),numpy.float32)
        self.gl_update_matrix_table()

        self.gl_draw_objects = list(self.gl_generate_draw_objects(self.scene_graph))
        self.gl_draw_objects.sort(key=lambda draw_object: draw_object.material.unknown0) 
Example 41
Project: rank-ordered-autoencoder   Author: paulbertens   File: Visualizer.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def reshapeWeights(self, weights, normalize=True, modifier=None):
        # reshape the weights matrix to a grid for visualization
        n_rows = int(np.sqrt(weights.shape[1]))
        n_cols = int(np.sqrt(weights.shape[1]))
        kernel_size = int(np.sqrt(weights.shape[0]/3))
        weights_grid = np.zeros((int((np.sqrt(weights.shape[0]/3)+1)*n_rows), int((np.sqrt(weights.shape[0]/3)+1)*n_cols), 3), dtype=np.float32)
        for i in range(weights_grid.shape[0]/(kernel_size+1)):
            for j in range(weights_grid.shape[1]/(kernel_size+1)):
                index = i * (weights_grid.shape[0]/(kernel_size+1))+j
                if not np.isclose(np.sum(weights[:, index]), 0):
                    if normalize:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size]=\
                            (weights[:, index].reshape(kernel_size, kernel_size, 3) - np.min(weights[:, index])) / ((np.max(weights[:, index]) - np.min(weights[:, index])) + 1.e-6)
                    else:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] =\
                        (weights[:, index].reshape(kernel_size, kernel_size, 3))
                    if modifier is not None:
                        weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] *= modifier[index]

        return weights_grid 
Example 42
Project: rank-ordered-autoencoder   Author: paulbertens   File: RankOrderedAutoencoder.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def __init__(self, input_shape, output_shape):
        self.input_shape = input_shape
        self.input = np.zeros((output_shape[0], self.input_shape[0] * self.input_shape[1] *
                               self.input_shape[2]),dtype=np.float32)
        self.output = np.zeros(output_shape, dtype=np.float32)
        self.output_raw = np.zeros_like(self.output)
        self.output_error = np.zeros_like(self.output)
        self.output_average = np.zeros(self.output.shape[1], dtype=np.float32)
        self.weights = np.random.normal(0, np.sqrt(2.0 / (self.output.shape[1] + self.input.shape[1])),
                                        size=(self.input.shape[1], self.output.shape[1])).astype(np.float32)
        self.gradient = np.zeros_like(self.weights)
        self.reconstruction = np.zeros_like(self.weights)
        self.errors = np.zeros_like(self.weights)
        self.output_ranks = np.zeros(self.output.shape[1], dtype=np.int32)
        self.learning_rate = 1
        self.norm_limit = 0.1 
Example 43
Project: bnn-analysis   Author: myshkov   File: sampler.py    (MIT License) View Source Project 6 votes vote down vote up
def _normalise_data(self):
        self.train_x_mean = np.zeros(self.input_dim)
        self.train_x_std = np.ones(self.input_dim)

        self.train_y_mean = np.zeros(self.output_dim)
        self.train_y_std = np.ones(self.output_dim)

        if self.normalise_data:
            self.train_x_mean = np.mean(self.train_x, axis=0)
            self.train_x_std = np.std(self.train_x, axis=0)
            self.train_x_std[self.train_x_std == 0] = 1.

            self.train_x = (self.train_x - np.full(self.train_x.shape, self.train_x_mean, dtype=np.float32)) / \
                           np.full(self.train_x.shape, self.train_x_std, dtype=np.float32)

            self.test_x = (self.test_x - np.full(self.test_x.shape, self.train_x_mean, dtype=np.float32)) / \
                          np.full(self.test_x.shape, self.train_x_std, dtype=np.float32)

            self.train_y_mean = np.mean(self.train_y, axis=0)
            self.train_y_std = np.std(self.train_y, axis=0)

            if self.train_y_std == 0:
                self.train_y_std[self.train_y_std == 0] = 1.

            self.train_y = (self.train_y - self.train_y_mean) / self.train_y_std 
Example 44
Project: bnn-analysis   Author: myshkov   File: x3_env.py    (MIT License) View Source Project 6 votes vote down vote up
def create_training_test_sets(self):
        # training set
        train_x = np.random.uniform(self.data_interval_left, self.data_interval_right, size=self.data_size)
        train_x = np.sort(train_x)
        train_y = self.true_f(train_x) + 3. * np.random.randn(self.data_size)

        self.train_x = [train_x.reshape((train_x.shape[0], 1))]
        self.train_y = [train_y.reshape((train_y.shape[0], 1))]

        # test set for visualisation
        self.test_x = np.arange(self.view_xrange[0], self.view_xrange[1], 0.01, dtype=np.float32)
        self.test_x = np.reshape(self.test_x, (self.test_x.shape[0], 1))
        self.test_y = self.true_f(self.test_x)
        self.test_y = np.reshape(self.test_y, (self.test_y.shape[0], 1))

        self.test_x = [self.test_x]
        self.test_y = [self.test_y] 
Example 45
Project: pyku   Author: dubvulture   File: sudoku_steps.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)] 
Example 46
Project: pyku   Author: dubvulture   File: digit_classifier.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def _feature(image):
        """
        It's faster but still accurate enough with DSIZE = 14.
        ~0.9983 precision and recall
        :param image:
        :return: raw pixels as feature vector
        """
        image = cv2.resize(image, None, fx=DSIZE/28, fy=DSIZE/28,
                           interpolation=cv2.INTER_LINEAR)
        ret = image.astype(np.float32) / 255
        return ret.ravel() 
Example 47
Project: pyku   Author: dubvulture   File: digit_classifier.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def _zoning(image):
        """
        It works better with DSIZE = 28
        ~0.9967 precision and recall
        :param image:
        :return: #pixels/area ratio of each zone (7x7) as feature vector
        """
        zones = []
        for i in range(0, 28, 7):
            for j in range(0, 28, 7):
                roi = image[i:i+7, j:j+7]
                val = (np.sum(roi)/255) / 49.
                zones.append(val)
        return np.array(zones, np.float32) 
Example 48
Project: pyku   Author: dubvulture   File: digit_classifier.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def create_model(self, train_folder):
        """
        Return the training set, its labels and the trained model
        :param train_folder: folder where to retrieve data
        :return: (train_set, train_labels, trained_model)
        """
        digits = []
        labels = []
        for n in range(1, 10):
            folder = train_folder + str(n)
            samples = [pic for pic in os.listdir(folder)
                       if os.path.isfile(os.path.join(folder, pic))]

            for sample in samples:
                image = cv2.imread(os.path.join(folder, sample))
                # Expecting black on white
                image = 255 - cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                _, image = cv2.threshold(image, 0, 255,
                                         cv2.THRESH_BINARY + cv2.THRESH_OTSU)
                feat = self.feature(image)
                digits.append(feat)
                labels.append(n)

        digits = np.array(digits, np.float32)
        labels = np.array(labels, np.float32)
        if cv2.__version__[0] == '2':
            model = cv2.KNearest()
            model.train(digits, labels)
        else:
            model = cv2.ml.KNearest_create()
            model.train(digits, cv2.ml.ROW_SAMPLE, labels)
        return digits, labels, model 
Example 49
Project: pyku   Author: dubvulture   File: sudoku.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)] 
Example 50
Project: IntroToDeepLearning   Author: robb-brown   File: input_data.py    (MIT License) View Source Project 5 votes vote down vote up
def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      self.imageShape = images.shape[1:]
      self.imageChannels = self.imageShape[2]

      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2] * images.shape[3])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    try:
      if len(numpy.shape(self._labels)) == 1:
        self._labels = dense_to_one_hot(self._labels,len(numpy.unique(self._labels)))
    except:
      traceback.print_exc()
    self._epochs_completed = 0
    self._index_in_epoch = 0