Python numpy.where() Examples

The following are code examples for showing how to use numpy.where(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: b2ac   Author: hbldh   File: reference.py    MIT License 6 votes vote down vote up
def fit_B2AC(points):
    """Ellipse fitting in Python with numerically unstable algorithm.

    Described `here <http://research.microsoft.com/pubs/67845/ellipse-pami.pdf>`_.

    N_POLYPOINTS.B. Do not use, since it works with almost singular matrix.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    import scipy.linalg as scla

    constraint_matrix = np.zeros((6, 6))
    constraint_matrix[0, 2] = 2
    constraint_matrix[1, 1] = -1
    constraint_matrix[2, 0] = 2

    S = _calculate_scatter_matrix_py(points[:, 0], points[:, 1])

    evals, evect = scla.eig(S, constraint_matrix)
    ind = np.where(evals == (evals[evals > 0].min()))[0][0]
    return evect[:, ind] 
Example 2
Project: b2ac   Author: hbldh   File: unstable.py    MIT License 6 votes vote down vote up
def fit_unstable_B2AC(points):
    """Ellipse fitting in Python with numerically unstable algorithm. Requires SciPy to run!

    Described `here <http://research.microsoft.com/pubs/67845/ellipse-pami.pdf>`_.

    N.B. Do not use, since it works with almost singular matrix.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    import scipy.linalg as scla

    constraint_matrix = np.zeros((6, 6))
    constraint_matrix[0, 2] = 2
    constraint_matrix[1, 1] = -1
    constraint_matrix[2, 0] = 2

    S = _calculate_scatter_matrix_double(points[:, 0], points[:, 1])

    eigenvalues, eigenvalues = scla.eig(S, constraint_matrix)
    ind = np.where(eigenvalues == (eigenvalues[eigenvalues > 0].min()))[0][0]
    return eigenvalues[:, ind] 
Example 3
Project: building-boundary   Author: Geodan   File: segmentation.py    MIT License 6 votes vote down vote up
def ransac_line_segmentation(points, distance):
    """
    Segment a line using RANSAC.

    Parameters
    ----------
    points : (Mx2) array
        The coordinates of the points
    distance : float
        The maximum distance between a point and a line for a point to be
        considered belonging to that line.

    Returns
    -------
    inliers : list of bool
        True where point is an inlier.
    """
    _, inliers = ransac(points, LineModelND,
                        min_samples=2,
                        residual_threshold=distance,
                        max_trials=1000)
    return inliers 
Example 4
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: train_val.py    MIT License 6 votes vote down vote up
def filter_roidb(roidb):
  """Remove roidb entries that have no usable RoIs."""

  def is_valid(entry):
    # Valid images have:
    #   (1) At least one foreground RoI OR
    #   (2) At least one background RoI
    overlaps = entry['max_overlaps']
    # find boxes with sufficient overlap
    fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
    # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
    bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
                       (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
    # image is only valid if such boxes exist
    valid = len(fg_inds) > 0 or len(bg_inds) > 0
    return valid

  num = len(roidb)
  filtered_roidb = [entry for entry in roidb if is_valid(entry)]
  num_after = len(filtered_roidb)
  print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
                                                     num, num_after))
  return filtered_roidb 
Example 5
Project: cgp-cnn   Author: sg-nm   File: cgp.py    MIT License 6 votes vote down vote up
def _evaluation(self, pop, eval_flag):
        # create network list
        net_lists = []
        active_index = np.where(eval_flag)[0]
        for i in active_index:
            net_lists.append(pop[i].active_net_list())

        # evaluation
        fp = self.eval_func(net_lists)
        for i, j in enumerate(active_index):
            pop[j].eval = fp[i]
        evaluations = np.zeros(len(pop))
        for i in range(len(pop)):
            evaluations[i] = pop[i].eval

        self.num_eval += len(net_lists)
        return evaluations 
Example 6
Project: wikilinks   Author: trovdimi   File: weighted_pagerank.py    MIT License 6 votes vote down vote up
def pickle_correlations_zeros_january():
    db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
    conn = db._create_connection()

    print 'read'
    df = pd.read_sql('select source_article_id, target_article_id from link_features', conn)
    print 'loaded links'
    df2 = pd.read_sql('select prev_id, curr_id, counts from clickstream_derived_en_201501  where link_type_derived= "internal-link";',  conn)
    print 'loaded counts'
    result = pd.merge(df, df2, how='left', left_on = ['source_article_id', 'target_article_id'], right_on = ['prev_id', 'curr_id'])
    print 'merged counts'
    print result
    article_counts = result.groupby(by=["target_article_id"])['counts'].sum().reset_index()
    article_counts['counts'].fillna(0.0, inplace=True)
    print article_counts
    print 'write to file'
    article_counts[["target_article_id","counts"]].to_csv(TMP+'january_article_counts.tsv', sep='\t', index=False) 
Example 7
Project: ieml   Author: IEMLdev   File: tools.py    GNU General Public License v3.0 6 votes vote down vote up
def _tree_graph_path_of_node(tree_graph, node):
    if node in tree_graph.nodes:
        nodes = [(node, False)]
    else:
        nodes = []

    # can be a mode
    nodes += [(c[0], True) for c_list in tree_graph.transitions.values() for c in c_list if c[1][2] == node]
    if not nodes:
        raise ValueError("Node not in tree graph : %s" % str(node))

    def _build_coord(node, mode=False):
        if node == tree_graph.root:
            return [Coordinate(kind='s')]

        parent = tree_graph.nodes[numpy.where(tree_graph.array[:, tree_graph.nodes_index[node]])[0][0]]

        return _build_coord(parent) + \
               [Coordinate(index=[c[0] for c in tree_graph.transitions[parent]].index(node), kind='m' if mode else 'a')]

    return AdditivePath([MultiplicativePath(_build_coord(node, mode)) for node, mode in nodes]) 
Example 8
Project: Automated-Social-Annotation   Author: acadTags   File: SVM.py    MIT License 6 votes vote down vote up
def do_eval(modelToEval, evalX_embedded, evalY,hamming_q=FLAGS.ave_labels_per_doc):
    y_pred = modelToEval.predict(evalX_embedded)
    y_true = np.asarray(evalY)
    acc, prec, rec, hamming_loss = 0.0, 0.0, 0.0, 0.0
    for i in range(len(y_pred)):
        label_predicted = np.where(y_pred[i]==1)[0]
        curr_acc = calculate_accuracy(label_predicted,y_true[i])
        acc = acc + curr_acc
        curr_prec, curr_rec = calculate_precision_recall(label_predicted,y_true[i])
        prec = prec + curr_prec
        rec = rec + curr_rec
        curr_hl = calculate_hamming_loss(label_predicted,y_true[i])
        hamming_loss = hamming_loss + curr_hl
    acc = acc/float(len(y_pred))
    prec = prec/float(len(y_pred))
    rec = rec/float(len(y_pred))
    hamming_loss = hamming_loss/float(len(y_pred))/FLAGS.ave_labels_per_doc
    if prec+rec != 0:
        f_measure = 2*prec*rec/(prec+rec)
    else:
        f_measure = 0
    return acc,prec,rec,f_measure,hamming_loss

# this also needs evalX 
Example 9
Project: Automated-Social-Annotation   Author: acadTags   File: SVM.py    MIT License 6 votes vote down vote up
def display_for_qualitative_evaluation(modelToEval, evalX_embedded, evalX,evalY,vocabulary_index2word,vocabulary_index2word_label):
    prediction_str=""
    #generate the doc indexes same as for the deep learning models.
    number_examples=len(evalY)
    rn_dict={}
    rn.seed(1) # set the seed to produce same documents for prediction
    batch_size=128
    for i in range(0,500):
        batch_chosen=rn.randint(0,number_examples//batch_size)
        x_chosen=rn.randint(0,batch_size)
        #rn_dict[(batch_chosen*batch_size,x_chosen)]=1
        rn_dict[batch_chosen*batch_size+x_chosen]=1
        
    y_pred = modelToEval.predict(evalX_embedded)
    y_true = np.asarray(evalY)    
    for i in range(len(y_pred)):
        label_predicted = np.where(y_pred[i]==1)[0]
        if rn_dict.get(i) == 1:
            doc = 'doc: ' + ' '.join(display_results(evalX[i],vocabulary_index2word))
            pred = 'prediction-svm: ' + ' '.join(display_results(label_predicted,vocabulary_index2word_label))
            get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
            label = 'labels: ' + ' '.join(display_results(get_indexes(1,evalY[i]),vocabulary_index2word_label))
            prediction_str = prediction_str + '\n' + doc + '\n' + pred + '\n' + label + '\n'
    
    return prediction_str 
Example 10
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: proposal_target_layer.py    MIT License 6 votes vote down vote up
def _get_bbox_regression_labels(bbox_target_data, num_classes):
    """Bounding-box regression targets (bbox_target_data) are stored in a
    compact form N x (class, tx, ty, tw, th)

    This function expands those targets into the 4-of-4*K representation used
    by the network (i.e. only one class has non-zero targets).

    Returns:
        bbox_target (ndarray): N x 4K blob of regression targets
        bbox_inside_weights (ndarray): N x 4K blob of loss weights
    """

    clss = bbox_target_data[:, 0]
    bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
    bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
    inds = np.where(clss > 0)[0]
    for ind in inds:
        cls = clss[ind]
        start = int(4 * cls)
        end = start + 4
        bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
        bbox_inside_weights[ind, start:end] = cfg.FLAGS2["bbox_inside_weights"]
    return bbox_targets, bbox_inside_weights 
Example 11
Project: kuaa   Author: rafaelwerneck   File: plugin_majority_voting.py    GNU General Public License v3.0 6 votes vote down vote up
def majority_voting(predict_set, index):
    """
    Performs the calculation of the majority voting for the list of results,
    returning a numpy array with the majority class as 1.0, and the other
    classes as 0.0.
    """
    
    size_predict = len(predict_set[INDEX_ZERO][INDEX_ZERO])
    
    list_max_index = []
    mv = zeros(size_predict)
    for predict_list in predict_set:
        predict = predict_list[index]
        pred_max = max(predict)
        index_max = where(array(predict) == pred_max)[INDEX_ZERO][INDEX_ZERO]
        list_max_index.append(index_max)
    
    index_mv = majority(list_max_index)
    mv[index_mv] = 1.0
    
    return mv.tolist() 
Example 12
Project: kuaa   Author: rafaelwerneck   File: OPF.py    GNU General Public License v3.0 6 votes vote down vote up
def __findPrototypes(self, MST, labels):
        """
        Parameters:
        MST = MST adjacency matrix
        labels = labels of the nodes
        Note:

        Return:
        seeds = List with OPF prototypes
        """
        n = MST.shape[0]
        if n != len(labels): return []

        seeds = []

        for i in range(n):
            w = MST[i, :]
            wix = np.where(w != float("inf"))[0]
            l = map(lambda x: labels[x], wix)
            if set(l + [labels[i]]).__len__() > 1:
                seeds.append(i)

        return seeds 
Example 13
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 6 votes vote down vote up
def found_search(self, x, y):
        '''
        This function is applied when the lane lines have been detected in the previous frame.
        It uses a sliding window to search for lane pixels in close proximity (+/- 25 pixels in the x direction)
        around the previous detected polynomial.
        '''
        xvals = []
        yvals = []
        if self.found == True:
            i = 720
            j = 630
            while j >= 0:
                yval = np.mean([i,j])
                xval = (np.mean(self.fit0))*yval**2 + (np.mean(self.fit1))*yval + (np.mean(self.fit2))
                x_idx = np.where((((xval - 25) < x)&(x < (xval + 25))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    np.append(xvals, x_window)
                    np.append(yvals, y_window)
                i -= 90
                j -= 90
        if np.sum(xvals) == 0:
            self.found = False # If no lane pixels were detected then perform blind search
        return xvals, yvals, self.found 
Example 14
Project: mmdetection   Author: open-mmlab   File: base.py    Apache License 2.0 6 votes vote down vote up
def forward_train(self, imgs, img_metas, **kwargs):
        """
        Args:
            img (list[Tensor]): list of tensors of shape (1, C, H, W).
                Typically these should be mean centered and std scaled.

            img_metas (list[dict]): list of image info dict where each dict
                has:
                'img_shape', 'scale_factor', 'flip', and my also contain
                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
                For details on the values of these keys see
                `mmdet/datasets/pipelines/formatting.py:Collect`.

             **kwargs: specific to concrete implementation
        """
        pass 
Example 15
Project: mmdetection   Author: open-mmlab   File: sampler.py    Apache License 2.0 6 votes vote down vote up
def __iter__(self):
        indices = []
        for i, size in enumerate(self.group_sizes):
            if size == 0:
                continue
            indice = np.where(self.flag == i)[0]
            assert len(indice) == size
            np.random.shuffle(indice)
            num_extra = int(np.ceil(size / self.samples_per_gpu)
                            ) * self.samples_per_gpu - len(indice)
            indice = np.concatenate(
                [indice, np.random.choice(indice, num_extra)])
            indices.append(indice)
        indices = np.concatenate(indices)
        indices = [
            indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
            for i in np.random.permutation(
                range(len(indices) // self.samples_per_gpu))
        ]
        indices = np.concatenate(indices)
        indices = indices.astype(np.int64).tolist()
        assert len(indices) == self.num_samples
        return iter(indices) 
Example 16
Project: b2ac   Author: hbldh   File: reference.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses float storage during calculations and performs the
    eigensolver on a float array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    points = np.array(points, 'float')
    S = _calculate_scatter_matrix_py(points[:, 0], points[:, 1])
    S3 = S[3:, 3:]
    S3 = np.array([S3[0, 0], S3[0, 1], S3[0, 2], S3[1, 1], S3[1, 2], S3[2, 2]])
    S3_inv = inverse_symmetric_3by3_double(S3).reshape((3, 3))
    S2 = S[:3, 3:]
    T = -np.dot(S3_inv, S2.T)
    M = S[:3, :3] + np.dot(S2, T)
    inv_mat = np.array([[0, 0, 0.5], [0, -1, 0], [0.5, 0, 0]], 'float')
    M = inv_mat.dot(M)

    e_vals, e_vect = np.linalg.eig(M)

    try:
        elliptical_solution_index = np.where(((4 * e_vect[0, :] * e_vect[2, :]) - ((e_vect[1, :] ** 2))) > 0)[0][0]
    except:
        # No positive eigenvalues. Fit was not ellipse.
        raise ArithmeticError("No elliptical solution found.")

    a = e_vect[:, elliptical_solution_index]
    if a[0] < 0:
        a = -a
    return np.concatenate((a, np.dot(T, a))) 
Example 17
Project: b2ac   Author: hbldh   File: reference.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_int(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses int64 storage during calculations and performs the
    eigensolver on an integer array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    S = _calculate_scatter_matrix_c(points[:, 0], points[:, 1])
    S1 = np.array([S[0, 0], S[0, 1], S[0, 2], S[1, 1], S[1, 2], S[2, 2]])
    S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
    adj_S3, det_S3 = inverse_symmetric_3by3_int(S3)
    S2 = S[:3, 3:]
    T_no_det = - np.dot(np.array(adj_S3.reshape((3, 3)), 'int64'), np.array(S2.T, 'int64'))
    M_term2 = np.dot(np.array(S2, 'int64'), T_no_det) // det_S3
    M = add_symmetric_matrix(M_term2, S1)
    M[[0, 2], :] /= 2
    M[1, :] = -M[1, :]

    e_vals, e_vect = np.linalg.eig(M)

    try:
        elliptical_solution_index = np.where(((4 * e_vect[0, :] * e_vect[2, :]) - ((e_vect[1, :] ** 2))) > 0)[0][0]
    except:
        # No positive eigenvalues. Fit was not ellipse.
        raise ArithmeticError("No elliptical solution found.")
    a = e_vect[:, elliptical_solution_index]
    return np.concatenate((a, np.dot(T_no_det, a) / det_S3)) 
Example 18
Project: rhodonite   Author: nestauk   File: misc.py    MIT License 5 votes vote down vote up
def get_aggregate_vp(g, vp, vp_grouper, agg=None):
    """aggregate_property_map
    
    Parameters
    ----------
        g : :obj:`graph_tool.Graph` 
            A graph.
        vp : :obj:`str`
            String representing an internal property map of graph, g.
        vp_grouper : :obj:`str` 
            String representing name of an internal property map that will be 
            used to group by.
        agg : :obj:`function` 
            Function to aggregate by. For example, min, max, sum, numpy.mean, 
            etc.
    Returns
    -------
        :obj:`iter` of :obj:`float` 
            Aggregated values from x. 
    """
    vp_vals = get_vp_values(g, vp)
    vp_agg = get_vp_values(g, vp_grouper)
    
    sid_x = vp_agg.argsort()
    # Get where the sorted version of base changes groups
    split_idx = np.flatnonzero(np.diff(vp_agg[sid_x]) > 0) + 1
    # OR np.unique(base[sidx],return_index=True)[1][1:]

    # Finally sort inp based on the sorted indices and split based on split_idx
    vp_vals_grouped = np.split(vp_vals[sid_x], split_idx)
    
    x = sorted(set(vp_agg))
    if agg: 
        y = [agg(vvg) for vvg in vp_vals_grouped]
    else:
        y = vp_vals_grouped

    return x, y 
Example 19
Project: rhodonite   Author: nestauk   File: misc.py    MIT License 5 votes vote down vote up
def get_vp_values(g, vertex_prop_name):
    """get_vp_values
    Retrieves a vertex property from a graph, taking into account any filter.
    
    Parameters
    ----------
        g : :obj:`graph_tool.Graph` 
            A graph.
        vertex_prop_name :obj:`str`
            The name of an internal vertex property.
        
    Returns
    -------
        pm : :obj:`PropertyMapArray` 
            An array of the property map.
    """
    p_type = g.vp[vertex_prop_name].value_type()
    mask = g.get_vertex_filter()[0]
    if mask is not None:
        mask = np.where(mask.get_array())
        if p_type != 'string':
            pm = g.vp[vertex_prop_name].get_array()[mask]
        else:
            pm = [g.vp[vertex_prop_name][v]
                    for m, v in zip(mask, g.vertices()) if m == True]
    else:
        pm = g.vp[vertex_prop_name].get_array()
    
    return pm 
Example 20
Project: building-boundary   Author: Geodan   File: segmentation.py    MIT License 5 votes vote down vote up
def extract_segment(points, indices, distance):
    """
    Extract a line segment from a sequence of points.

    Parameters
    ----------
    points : (Mx2) array
        The coordinates of all the points.
    indices : list of int
        The indices of the points in the sequence.
    distance : float
        The maximum distance between a point and a line for a point to be
        considered belonging to that line.

    Returns
    -------
    segment : list of int
        The indices of the points belonging to the segment/line.
    """
    inliers = ransac_line_segmentation(points[indices], distance)
    inliers = indices[inliers]

    sequences = np.split(inliers, np.where(np.diff(inliers) != 1)[0] + 1)
    segment = list(max(sequences, key=len))

    if len(segment) > 1:
        segment = extend_segment(segment, points, indices, distance)
    elif len(segment) == 1:
        if segment[0] + 1 in indices:
            segment.append(segment[0] + 1)
            segment = extend_segment(segment, points, indices, distance)
        elif segment[0] - 1 in indices:
            segment.insert(0, segment[0] - 1)
            segment = extend_segment(segment, points, indices, distance)

    return segment 
Example 21
Project: building-boundary   Author: Geodan   File: segmentation.py    MIT License 5 votes vote down vote up
def get_insert_loc(segments, segment):
    """
    Uses a binary search to find the correct location to insert a new segment.

    Parameters
    ----------
    segments : list of list of int
        The indices of the points belonging to the segments/lines.
    segment : list of int
        The indices of the points belonging to the segment/line.

    Returns
    -------
     : int
        The index where the segment should be inserted.
    """
    if len(segments) == 0:
        return 0
    if segment[0] > segments[-1][0]:
        return len(segments)

    lo = 0
    hi = len(segments)
    while lo < hi:
        mid = (lo + hi) // 2
        if segment[0] < segments[mid][0]:
            hi = mid
        else:
            lo = mid + 1
    return lo 
Example 22
Project: building-boundary   Author: Geodan   File: segmentation.py    MIT License 5 votes vote down vote up
def get_remaining_sequences(indices, mask):
    """
    Gets the remaining sequences given the points that are already part of
    a segment.

    Parameters
    ----------
    indices : list of int
        The indices of the points in the sequence.
    mask : list of bool
        Marks the points that are part of a segment.

    Returns
    -------
    sequences : list of list of int
        The indices of each remaining sequence.
    """
    sequences = np.split(indices, np.where(np.diff(mask) == 1)[0] + 1)

    if mask[0]:
        sequences = [s for i, s in enumerate(sequences) if i % 2 == 0]
    else:
        sequences = [s for i, s in enumerate(sequences) if i % 2 != 0]

    sequences = [s for s in sequences if len(s) > 1]

    return sequences 
Example 23
Project: building-boundary   Author: Geodan   File: merge.py    MIT License 5 votes vote down vote up
def find_pivots(orientations, angle):
    """
    Finds the indices of where the difference in orientation is
    larger than the given angle.

    Parameters
    ----------
    orientations : list of float
        The sequence of orientations
    angle : float or int
        The difference in angle at which a point will be considered a
        pivot.

    Returns
    -------
    pivot_indices : list of int
    """
    ori_diff = np.fromiter((utils.angle.angle_difference(a1, a2) for
                            a1, a2 in utils.create_pairs(orientations)),
                           orientations.dtype)
    pivots_bool = ori_diff > angle
    pivots_idx = list(np.where(pivots_bool)[0] + 1)

    # edge case
    if pivots_idx[-1] > (len(orientations)-1):
        del pivots_idx[-1]
        pivots_idx[0:0] = [0]

    return pivots_idx 
Example 24
Project: RF-Monitor   Author: EarToEarOak   File: cli.py    GNU General Public License v2.0 5 votes vote down vote up
def __on_scan_data(self, event):
        levels = numpy.log10(event['l'])
        levels *= 10

        noise = numpy.percentile(levels,
                                 self._dynP)

        for monitor in self._monitors:
            freq = monitor.get_frequency()
            if monitor.get_enabled():
                monitor.set_noise(noise)
                index = numpy.where(freq == event['f'])[0]
                signal = monitor.set_level(levels[index][0],
                                           event['timestamp'],
                                           self._location)

                if signal is not None:
                    signals = 'Signals: {}\r'.format(self.__count_signals() -
                                                     self._signalCount)
                    self.__std_out(signals, False)
                    if signal.end is not None:
                        recording = format_recording(freq, signal)
                        if self._pushUri is not None:
                            self._push.send(self._pushUri,
                                            recording)
                        if self._server is not None:
                            self._server.send(recording)
                        if self._json:
                            sys.stdout.write(recording + '\n') 
Example 25
Project: RF-Monitor   Author: EarToEarOak   File: gui.py    GNU General Public License v2.0 5 votes vote down vote up
def __on_scan_data(self, event):
        levels = numpy.log10(event['l'])
        levels *= 10
        self._levels = levels

        noise = numpy.percentile(levels,
                                 self._toolbar.get_dynamic_percentile())

        updated = False
        for monitor in self._monitors:
            freq = monitor.get_frequency()
            if monitor.get_enabled():
                monitor.set_noise(noise)
                index = numpy.where(freq == event['f'])[0]
                signal = monitor.set_level(levels[index][0],
                                           event['timestamp'],
                                           self._location)
                if signal is not None:
                    updated = True
                    if signal.end is not None:
                        recording = format_recording(freq, signal)
                        if self._settings.get_push_enable():
                            self._push.send(self._settings.get_push_uri(),
                                            recording)
                        if self._server is not None:
                            self._server.send(recording)

        if updated:
            if self._isSaved:
                self._isSaved = False
                self.__set_title()
                self.__set_timeline()

        self.__set_spectrum(noise)
        self._rssi.set_noise(numpy.mean(levels))
        self._rssi.set_level(numpy.max(levels)) 
Example 26
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: layer.py    MIT License 5 votes vote down vote up
def _shuffle_roidb_inds(self):
    """Randomly permute the training roidb."""
    # If the random flag is set, 
    # then the database is shuffled according to system time
    # Useful for the validation set
    if self._random:
      st0 = np.random.get_state()
      millis = int(round(time.time() * 1000)) % 4294967295
      np.random.seed(millis)
    
    if cfg.TRAIN.ASPECT_GROUPING:
      raise NotImplementedError
      '''
      widths = np.array([r['width'] for r in self._roidb])
      heights = np.array([r['height'] for r in self._roidb])
      horz = (widths >= heights)
      vert = np.logical_not(horz)
      horz_inds = np.where(horz)[0]
      vert_inds = np.where(vert)[0]
      inds = np.hstack((
          np.random.permutation(horz_inds),
          np.random.permutation(vert_inds)))
      inds = np.reshape(inds, (-1, 2))
      row_perm = np.random.permutation(np.arange(inds.shape[0]))
      inds = np.reshape(inds[row_perm, :], (-1,))
      self._perm = inds
      '''
    else:
      self._perm = np.random.permutation(np.arange(len(self._roidb)))
    # Restore the random state
    if self._random:
      np.random.set_state(st0)
      
    self._cur = 0 
Example 27
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: train_val.py    MIT License 5 votes vote down vote up
def remove_snapshot(self, np_paths, ss_paths):
    to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT
    for c in range(to_remove):
      nfile = np_paths[0]
      os.remove(str(nfile))
      np_paths.remove(nfile)

    to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT
    for c in range(to_remove):
      sfile = ss_paths[0]
      # To make the code compatible to earlier versions of Tensorflow,
      # where the naming tradition for checkpoints are different
      os.remove(str(sfile))
      ss_paths.remove(sfile) 
Example 28
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test.py    MIT License 5 votes vote down vote up
def apply_nms(all_boxes, thresh):
  """Apply non-maximum suppression to all predicted boxes output by the
  test_net method.
  """
  num_classes = len(all_boxes)
  num_images = len(all_boxes[0])
  nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
  for cls_ind in range(num_classes):
    for im_ind in range(num_images):
      dets = all_boxes[cls_ind][im_ind]
      if dets == []:
        continue

      x1 = dets[:, 0]
      y1 = dets[:, 1]
      x2 = dets[:, 2]
      y2 = dets[:, 3]
      scores = dets[:, 4]
      inds = np.where((x2 > x1) & (y2 > y1))[0]
      dets = dets[inds,:]
      if dets == []:
        continue

      keep = nms(torch.from_numpy(dets), thresh).numpy()
      if len(keep) == 0:
        continue
      nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
  return nms_boxes 
Example 29
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example 30
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: coco.py    MIT License 5 votes vote down vote up
def __init__(self, image_set, year):
    imdb.__init__(self, 'coco_' + year + '_' + image_set)
    # COCO specific config options
    self.config = {'use_salt': True,
                   'cleanup': True}
    # name, paths
    self._year = year
    self._image_set = image_set
    self._data_path = osp.join(cfg.DATA_DIR, 'coco')
    # load COCO API, classes, class <-> id mappings
    self._COCO = COCO(self._get_ann_file())
    cats = self._COCO.loadCats(self._COCO.getCatIds())
    self._classes = tuple(['__background__'] + [c['name'] for c in cats])
    self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
    self._class_to_coco_cat_id = dict(list(zip([c['name'] for c in cats],
                                               self._COCO.getCatIds())))
    self._image_index = self._load_image_set_index()
    # Default to roidb handler
    self.set_proposal_method('gt')
    self.competition_mode(False)

    # Some image sets are "views" (i.e. subsets) into others.
    # For example, minival2014 is a random 5000 image subset of val2014.
    # This mapping tells us where the view's images and proposals come from.
    self._view_map = {
      'minival2014': 'val2014',  # 5k val2014 subset
      'valminusminival2014': 'val2014',  # val2014 \setminus minival2014
      'test-dev2015': 'test2015',
    }
    coco_name = image_set + year  # e.g., "val2014"
    self._data_name = (self._view_map[coco_name]
                       if coco_name in self._view_map
                       else coco_name)
    # Dataset splits that have ground-truth annotations (test splits
    # do not have gt annotations)
    self._gt_splits = ('train', 'val', 'minival') 
Example 31
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: coco.py    MIT License 5 votes vote down vote up
def _print_detection_eval_metrics(self, coco_eval):
    IoU_lo_thresh = 0.5
    IoU_hi_thresh = 0.95

    def _get_thr_ind(coco_eval, thr):
      ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
                     (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
      iou_thr = coco_eval.params.iouThrs[ind]
      assert np.isclose(iou_thr, thr)
      return ind

    ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
    ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
    # precision has dims (iou, recall, cls, area range, max dets)
    # area range index 0: all area ranges
    # max dets index 2: 100 per image
    precision = \
      coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
    ap_default = np.mean(precision[precision > -1])
    print(('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
           '~~~~').format(IoU_lo_thresh, IoU_hi_thresh))
    print('{:.1f}'.format(100 * ap_default))
    for cls_ind, cls in enumerate(self.classes):
      if cls == '__background__':
        continue
      # minus 1 because of __background__
      precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
      ap = np.mean(precision[precision > -1])
      print('{:.1f}'.format(100 * ap))

    print('~~~~ Summary metrics ~~~~')
    coco_eval.summarize() 
Example 32
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: ds_utils.py    MIT License 5 votes vote down vote up
def filter_small_boxes(boxes, min_size):
  w = boxes[:, 2] - boxes[:, 0]
  h = boxes[:, 3] - boxes[:, 1]
  keep = np.where((w >= min_size) & (h > min_size))[0]
  return keep 
Example 33
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: demo.py    MIT License 5 votes vote down vote up
def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor='red', linewidth=3.5)
            )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(class_name, score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

    ax.set_title(('{} detections with '
                  'p({} | box) >= {:.1f}').format(class_name, class_name,
                                                  thresh),
                  fontsize=14)
    plt.axis('off')
    plt.tight_layout()
    plt.draw() 
Example 34
Project: ultra_secret_scripts   Author: CharlesDankoff   File: image_search.py    GNU General Public License v3.0 5 votes vote down vote up
def search_image_in_image(small_image, large_image, precision=0.95):
    template = small_image.astype(np.float32)
    img_rgb = large_image.astype(np.float32)

    template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
    img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)

    res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)
    threshold = precision
    loc = np.where(res >= threshold)

    found_positions = list(zip(*loc[::-1]))

    # print("FOUND: {}".format(found_positions))
    return found_positions 
Example 35
Project: prediction-constrained-topic-models   Author: dtak   File: train_and_eval_sklearn_binary_classifier.py    MIT License 5 votes vote down vote up
def predict(self, x):
        ''' Make thresholded predictions

        Returns
        -------
        yhat_N : 1D array of class labels
        '''
        yproba_class1_N = self.clf.predict_proba(x)[:,1]
        # Recall that np.where assigns as follows:
        # first value in self.classes when True
        # second value when False
        yhat_N = np.where(yproba_class1_N <= self.proba_thr_for_class1, *self.classes_)
        return yhat_N 
Example 36
Project: wikilinks   Author: trovdimi   File: compare_hypothesis.py    MIT License 5 votes vote down vote up
def norm_hyp(matrix):
    print "in norm_hyp"
    tmp = csr_matrix(matrix, copy=True)
    norm_h = tmp.sum(axis=1)
    n_nzeros = np.where(norm_h > 0)
    norm_h[n_nzeros] = 1.0 / norm_h[n_nzeros]
    norm_h = np.array(norm_h).T[0]
    print "in place mod"
    # modify sparse_csc_matrix in place
    csr_scale_rows(tmp.shape[0],
                   tmp.shape[1],
                   tmp.indptr,
                   tmp.indices,
                   tmp.data, norm_h)
    return tmp 
Example 37
Project: wikilinks   Author: trovdimi   File: weighted_pagerank.py    MIT License 5 votes vote down vote up
def norm (hypothesis):
    hypothesis = hypothesis.copy()
    norma = hypothesis.sum(axis=1)
    n_nzeros = np.where(norma > 0)
    n_zeros,_ = np.where(norma == 0)
    norma[n_nzeros] = 1.0 / norma[n_nzeros]
    norma = norma.T[0]
    csr_scale_rows(hypothesis.shape[0], hypothesis.shape[1], hypothesis.indptr, hypothesis.indices, hypothesis.data, norma)
    return hypothesis 
Example 38
Project: wikilinks   Author: trovdimi   File: weighted_pagerank.py    MIT License 5 votes vote down vote up
def correlations(network_name):
    db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
    conn = db._create_connection()
    cursor = conn.cursor()
    # wikipedia  graph  structural statistics

    results = None
    try:
        results = cursor.execute('select c.curr_id,  sum(c.counts) as counts from clickstream_derived c where c.link_type_derived= %s  group by c.curr_id;', ("internal-link",))
        results = cursor.fetchall()


    except MySQLdb.Error, e:
        print ('error retrieving xy coord for all links links %s (%d)' % (e.args[1], e.args[0])) 
Example 39
Project: wikilinks   Author: trovdimi   File: HypTrails.py    MIT License 5 votes vote down vote up
def zero_rows_norm(self, hypothesis, structur,k):
        norma = hypothesis.sum(axis=1)
        n_zeros = np.where(norma == 0)
        print 'n_zeros'
        print len(n_zeros[0])
        for x, i in enumerate(n_zeros[0]):
            if x % 1000 == 0:
                print x, len(n_zeros[0])
            links = np.where(structur[i,:]!=0)
            hypothesis[i,links[0]] = k / len(links[0])
        print 'n_zeros done'


    # def zero_rows_norm_eff(self,hypothesis, structur):
    #     #find zero sum rows in hypothesis
    #     print 'sum hyp'
    #     norma = hypothesis.sum(axis=1)
    #     n_zeros = np.where(norma == 0)
    #     # norm the structure matrix
    #     print 'sum structure'
    #     tmp = structur[n_zeros]
    #     norm_s = tmp.sum(axis=1)
    #     norm_s = np.array(norm_s).T[0]
    #     tmp = tmp/norm_s[:,None]
    #     #replece the zero rows in hypothesis with the corresponding rows in the normed strcuture matrix
    #     print 'replace'
    #     hypotheis[n_zeros,:]=tmp[n_zeros,:] 
Example 40
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def APPSA(gt, rc):
    nom = np.sum(gt * rc, axis=0)
    denom = np.linalg.norm(gt, axis=0) * np.linalg.norm(rc, axis=0)
    
    cos = np.where((nom / (denom + 1e-3)) > 1, 1, (nom / (denom + 1e-3)))
    appsa = np.arccos(cos)
    
    return np.sum(appsa) / (gt.shape[1] * gt.shape[0]) 
Example 41
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def APPSA(gt, rc):
    nom = np.sum(gt * rc, axis=0)
    denom = np.linalg.norm(gt, axis=0) * np.linalg.norm(rc, axis=0)
    
    cos = np.where((nom / (denom + 1e-3)) > 1, 1, (nom / (denom + 1e-3)))
    appsa = np.arccos(cos)
    
    return np.sum(appsa) / (gt.shape[1] * gt.shape[0]) 
Example 42
Project: Automated-Social-Annotation   Author: acadTags   File: BiGRU_train.py    MIT License 5 votes vote down vote up
def assign_pretrained_word_embedding(sess,vocabulary_index2word,vocab_size,model,num_run,word2vec_model_path=None):
    if num_run==0:
        print("using pre-trained word emebedding.started.word2vec_model_path:",word2vec_model_path)
    # transform embedding input into a dictionary
    # word2vecc=word2vec.load('word_embedding.txt') #load vocab-vector fiel.word2vecc['w91874']
    word2vec_model = word2vec.load(word2vec_model_path, kind='bin')
    word2vec_dict = {}
    for word, vector in zip(word2vec_model.vocab, word2vec_model.vectors):
        word2vec_dict[word] = vector
    word_embedding_2dlist = [[]] * vocab_size  # create an empty word_embedding list: which is a list of list, i.e. a list of word, where each word is a list of values as an embedding vector.
    word_embedding_2dlist[0] = np.zeros(FLAGS.embed_size)  # assign empty for first word:'PAD'
    bound = np.sqrt(6.0) / np.sqrt(vocab_size)  # bound for random variables.
    count_exist = 0;
    count_not_exist = 0
    for i in range(1, vocab_size):  # loop each word
        word = vocabulary_index2word[i]  # get a word
        embedding = None
        try:
            embedding = word2vec_dict[word]  # try to get vector:it is an array.
        except Exception:
            embedding = None
        if embedding is not None:  # the 'word' exist a embedding
            word_embedding_2dlist[i] = embedding;
            count_exist = count_exist + 1  # assign array to this word.
        else:  # no embedding for this word
            word_embedding_2dlist[i] = np.random.uniform(-bound, bound, FLAGS.embed_size);
            count_not_exist = count_not_exist + 1  # init a random value for the word.
    word_embedding_final = np.array(word_embedding_2dlist)  # covert to 2d array.
    #print(word_embedding_final[0]) # print the original embedding for the first word
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    t_assign_embedding = tf.assign(model.Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    sess.run(t_assign_embedding);
    if num_run==0:
        print("word. exists embedding:", count_exist, " ;word not exist embedding:", count_not_exist)
        print("using pre-trained word emebedding.ended...")

# based on a threshold, 在验证集上做验证,报告损失、精确度-multilabel 
Example 43
Project: Automated-Social-Annotation   Author: acadTags   File: BiGRU_train.py    MIT License 5 votes vote down vote up
def get_label_using_logits_threshold(logits,threshold=0.5):
    sig = sigmoid_array(logits)
    index_list = np.where(sig > threshold)[0]
    return index_list 
Example 44
Project: Automated-Social-Annotation   Author: acadTags   File: HAN_train.py    MIT License 5 votes vote down vote up
def assign_pretrained_word_embedding(sess,vocabulary_index2word,vocab_size,model,num_run,word2vec_model_path=None):
    if num_run==0:
        print("using pre-trained word emebedding.started.word2vec_model_path:",word2vec_model_path)
    # transform embedding input into a dictionary
    # word2vecc=word2vec.load('word_embedding.txt') #load vocab-vector fiel.word2vecc['w91874']
    word2vec_model = word2vec.load(word2vec_model_path, kind='bin')
    word2vec_dict = {}
    for word, vector in zip(word2vec_model.vocab, word2vec_model.vectors):
        word2vec_dict[word] = vector
    word_embedding_2dlist = [[]] * vocab_size  # create an empty word_embedding list: which is a list of list, i.e. a list of word, where each word is a list of values as an embedding vector.
    word_embedding_2dlist[0] = np.zeros(FLAGS.embed_size)  # assign empty for first word:'PAD'
    bound = np.sqrt(6.0) / np.sqrt(vocab_size)  # bound for random variables.
    count_exist = 0;
    count_not_exist = 0
    for i in range(1, vocab_size):  # loop each word
        word = vocabulary_index2word[i]  # get a word
        embedding = None
        try:
            embedding = word2vec_dict[word]  # try to get vector:it is an array.
        except Exception:
            embedding = None
        if embedding is not None:  # the 'word' exist a embedding
            word_embedding_2dlist[i] = embedding;
            count_exist = count_exist + 1  # assign array to this word.
        else:  # no embedding for this word
            word_embedding_2dlist[i] = np.random.uniform(-bound, bound, FLAGS.embed_size);
            count_not_exist = count_not_exist + 1  # init a random value for the word.
    word_embedding_final = np.array(word_embedding_2dlist)  # covert to 2d array.
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    t_assign_embedding = tf.assign(model.Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    sess.run(t_assign_embedding);
    if num_run==0:
        print("word. exists embedding:", count_exist, " ;word not exist embedding:", count_not_exist)
        print("using pre-trained word emebedding.ended...")

# based on a threshold, 在验证集上做验证,报告损失、精确度-multilabel 
Example 45
Project: Automated-Social-Annotation   Author: acadTags   File: JMAN_train.py    MIT License 5 votes vote down vote up
def assign_pretrained_word_embedding(sess,vocabulary_index2word,vocab_size,model,num_run,word2vec_model_path=None):
    if num_run==0:
        print("using pre-trained word emebedding.started.word2vec_model_path:",word2vec_model_path)
    # transform embedding input into a dictionary
    # word2vecc=word2vec.load('word_embedding.txt') #load vocab-vector fiel.word2vecc['w91874']
    word2vec_model = word2vec.load(word2vec_model_path, kind='bin')
    word2vec_dict = {}
    for word, vector in zip(word2vec_model.vocab, word2vec_model.vectors):
        word2vec_dict[word] = vector
    word_embedding_2dlist = [[]] * vocab_size  # create an empty word_embedding list: which is a list of list, i.e. a list of word, where each word is a list of values as an embedding vector.
    word_embedding_2dlist[0] = np.zeros(FLAGS.embed_size)  # assign empty for first word:'PAD'
    bound = np.sqrt(6.0) / np.sqrt(vocab_size)  # bound for random variables.
    count_exist = 0;
    count_not_exist = 0
    for i in range(1, vocab_size):  # loop each word
        word = vocabulary_index2word[i]  # get a word
        embedding = None
        try:
            embedding = word2vec_dict[word]  # try to get vector:it is an array.
        except Exception:
            embedding = None
        if embedding is not None:  # the 'word' exist a embedding
            word_embedding_2dlist[i] = embedding;
            count_exist = count_exist + 1  # assign array to this word.
        else:  # no embedding for this word
            word_embedding_2dlist[i] = np.random.uniform(-bound, bound, FLAGS.embed_size);
            count_not_exist = count_not_exist + 1  # init a random value for the word.
    word_embedding_final = np.array(word_embedding_2dlist)  # covert to 2d array.
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    t_assign_embedding = tf.assign(model.Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    sess.run(t_assign_embedding);
    if num_run==0:
        print("word. exists embedding:", count_exist, " ;word not exist embedding:", count_not_exist)
        print("using pre-trained word emebedding.ended...") 
Example 46
Project: Automated-Social-Annotation   Author: acadTags   File: JMAN_train.py    MIT License 5 votes vote down vote up
def get_label_using_logits_threshold(logits,threshold=0.5):
    sig = sigmoid_array(logits)
    index_list = np.where(sig > threshold)[0]
    return index_list 
Example 47
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def possibilities_generator(
        prior, min_pos, max_start_pos, constraint_len, total_filled):
    """
    Given a row prior, a min_pos, max_start_pos, and constraint length,
    yield each potential row

    prior is an array of:
        -1 (unknown),
        0 (definitely empty),
        1 (definitely filled)
    """
    prior_filled = np.zeros(len(prior)).astype(bool)
    prior_filled[prior == 1] = True
    prior_empty = np.zeros(len(prior)).astype(bool)
    prior_empty[prior == 0] = True
    for start_pos in range(min_pos, max_start_pos + 1):
        possible = -1 * np.ones(len(prior))
        possible[start_pos:start_pos + constraint_len] = 1
        if start_pos + constraint_len < len(possible):
            possible[start_pos + constraint_len] = 0
        if start_pos > 0:
            possible[start_pos - 1] = 0

        # add in the prior
        possible[np.logical_and(possible == -1, prior == 0)] = 0
        possible[np.logical_and(possible == -1, prior == 1)] = 1

        # if contradiction with prior, continue
        # 1. possible changes prior = 1 to something else
        # 2. possible changes prior = 0 to something else
        # 3. everything is assigned in possible but there are not
        #    enough filled in
        # 4. possible changes nothing about the prior
        if np.any(possible[np.where(prior == 1)[0]] != 1) or \
                np.any(possible[np.where(prior == 0)[0]] != 0) or \
                np.sum(possible == 1) > total_filled or \
                (np.all(possible >= 0) and np.sum(possible == 1) <
                    total_filled) or \
                np.all(prior == possible):
            continue
        yield possible 
Example 48
Project: nonogram-solver   Author: mprat   File: nonogram.py    MIT License 5 votes vote down vote up
def init_from_matrix(self, matrix):
        """
        Args:
            matrix (numpy array): array of arrays representing the solution
        """
        self.solution_state = matrix
        self.solution_list = zip(np.where(matrix == 1))
        # TODO: finish this function 
Example 49
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: demo.py    MIT License 5 votes vote down vote up
def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor='red', linewidth=3.5)
        )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(class_name, score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

    ax.set_title(('{} detections with '
                  'p({} | box) >= {:.1f}').format(class_name, class_name,
                                                  thresh),
                 fontsize=14)
    plt.axis('off')
    plt.tight_layout()
    plt.draw() 
Example 50
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: minibatch.py    MIT License 5 votes vote down vote up
def get_minibatch(roidb, num_classes):
    """Given a roidb, construct a minibatch sampled from it."""
    num_images = len(roidb)
    # Sample random scales to use for each image in this batch
    random_scale_inds = npr.randint(0, high=len(cfg.FLAGS2["scales"]),
                                    size=num_images)
    assert (cfg.FLAGS.batch_size % num_images == 0), 'num_images ({}) must divide BATCH_SIZE ({})'.format(num_images, cfg.FLAGS.batch_size)

    # Get the input image blob, formatted for caffe
    im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

    blobs = {'data': im_blob}

    assert len(im_scales) == 1, "Single batch only"
    assert len(roidb) == 1, "Single batch only"

    # gt boxes: (x1, y1, x2, y2, cls)
    if cfg.FLAGS.use_all_gt:
        # Include all ground truth boxes
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
    else:
        # For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
        gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
    gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
    gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
    gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    blobs['gt_boxes'] = gt_boxes
    blobs['im_info'] = np.array(
        [[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
        dtype=np.float32)

    return blobs