Python numpy.sum() Examples

The following are 30 code examples for showing how to use numpy.sum(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: libTLDA   Author: wmkouw   File: tcpr.py    License: MIT License 6 votes vote down vote up
def add_intercept(self, X):
        """Add 1's to data as last features."""
        # Data shape
        N, D = X.shape

        # Check if there's not already an intercept column
        if np.any(np.sum(X, axis=0) == N):

            # Report
            print('Intercept is not the last feature. Swapping..')

            # Find which column contains the intercept
            intercept_index = np.argwhere(np.sum(X, axis=0) == N)

            # Swap intercept to last
            X = X[:, np.setdiff1d(np.arange(D), intercept_index)]

        # Add intercept as last column
        X = np.hstack((X, np.ones((N, 1))))

        # Append column of 1's to data, and increment dimensionality
        return X, D+1 
Example 2
Project: DDPAE-video-prediction   Author: jthsieh   File: metrics.py    License: MIT License 6 votes vote down vote up
def find_match(self, pred, gt):
    '''
    Match component to balls.
    '''
    batch_size, n_frames_input, n_components, _ = pred.shape
    diff = pred.reshape(batch_size, n_frames_input, n_components, 1, 2) - \
               gt.reshape(batch_size, n_frames_input, 1, n_components, 2)
    diff = np.sum(np.sum(diff ** 2, axis=-1), axis=1)
    # Direct indices
    indices = np.argmin(diff, axis=2)
    ambiguous = np.zeros(batch_size, dtype=np.int8)
    for i in range(batch_size):
      _, counts = np.unique(indices[i], return_counts=True)
      if not np.all(counts == 1):
        ambiguous[i] = 1
    return indices, ambiguous 
Example 3
Project: dustmaps   Author: gregreen   File: test_bayestar.py    License: GNU General Public License v2.0 6 votes vote down vote up
def test_bounds(self):
        """
        Test that out-of-bounds coordinates return NaN reddening, and that
        in-bounds coordinates do not return NaN reddening.
        """

        for mode in (['random_sample', 'random_sample_per_pix',
                      'median', 'samples', 'mean']):
            # Draw random coordinates, both above and below dec = -30 degree line
            n_pix = 1000
            ra = -180. + 360.*np.random.random(n_pix)
            dec = -75. + 90.*np.random.random(n_pix)    # 45 degrees above/below
            c = coords.SkyCoord(ra, dec, frame='icrs', unit='deg')

            ebv_calc = self._bayestar(c, mode=mode)

            nan_below = np.isnan(ebv_calc[dec < -35.])
            nan_above = np.isnan(ebv_calc[dec > -25.])
            pct_nan_above = np.sum(nan_above) / float(nan_above.size)

            # print r'{:s}: {:.5f}% nan above dec=-25 deg.'.format(mode, 100.*pct_nan_above)

            self.assertTrue(np.all(nan_below))
            self.assertTrue(pct_nan_above < 0.05) 
Example 4
Project: dustmaps   Author: gregreen   File: test_iphas.py    License: GNU General Public License v2.0 6 votes vote down vote up
def test_bounds(self):
        """
        Test that out-of-bounds coordinates return NaN reddening, and that
        in-bounds coordinates do not return NaN reddening.
        """

        for mode in (['random_sample', 'random_sample_per_pix',
                      'median', 'samples', 'mean']):
            # Draw random coordinates on the sphere
            n_pix = 10000
            u, v = np.random.random((2,n_pix))
            l = 360. * u
            b = 90. - np.degrees(np.arccos(2.*v - 1.))
            c = coords.SkyCoord(l, b, frame='galactic', unit='deg')

            A_calc = self._iphas(c, mode=mode)

            in_bounds = (l > 32.) & (l < 213.) & (b < 4.5) & (b > -4.5)
            out_of_bounds = (l < 28.) | (l > 217.) | (b > 7.) | (b < -7.)

            n_nan_in_bounds = np.sum(np.isnan(A_calc[in_bounds]))
            n_finite_out_of_bounds = np.sum(np.isfinite(A_calc[out_of_bounds]))

            self.assertTrue(n_nan_in_bounds == 0)
            self.assertTrue(n_finite_out_of_bounds == 0) 
Example 5
Project: StructEngPy   Author: zhuoju36   File: dynamic.py    License: MIT License 6 votes vote down vote up
def solve_modal(model,k:int):
    """
    Solve eigen mode of the MDOF system
    
    params:
        model: FEModel.
        k: number of modes to extract.
    """
    K_,M_=model.K_,model.M_
    if k>model.DOF:
        logger.info('Warning: the modal number to extract is larger than the system DOFs, only %d modes are available'%model.DOF)
        k=model.DOF
    omega2s,modes = sl.eigsh(K_,k,M_,sigma=0,which='LM')
    delta = modes/np.sum(modes,axis=0)
    model.is_solved=True
    model.mode_=delta
    model.omega_=np.sqrt(omega2s).reshape((k,1)) 
Example 6
Project: tensorflow-DeepFM   Author: ChenglongChen   File: main.py    License: MIT License 6 votes vote down vote up
def _load_data():

    dfTrain = pd.read_csv(config.TRAIN_FILE)
    dfTest = pd.read_csv(config.TEST_FILE)

    def preprocess(df):
        cols = [c for c in df.columns if c not in ["id", "target"]]
        df["missing_feat"] = np.sum((df[cols] == -1).values, axis=1)
        df["ps_car_13_x_ps_reg_03"] = df["ps_car_13"] * df["ps_reg_03"]
        return df

    dfTrain = preprocess(dfTrain)
    dfTest = preprocess(dfTest)

    cols = [c for c in dfTrain.columns if c not in ["id", "target"]]
    cols = [c for c in cols if (not c in config.IGNORE_COLS)]

    X_train = dfTrain[cols].values
    y_train = dfTrain["target"].values
    X_test = dfTest[cols].values
    ids_test = dfTest["id"].values
    cat_features_indices = [i for i,c in enumerate(cols) if c in config.CATEGORICAL_COLS]

    return dfTrain, dfTest, X_train, y_train, X_test, ids_test, cat_features_indices 
Example 7
Project: models   Author: kipoi   File: dataloader_m.py    License: MIT License 6 votes vote down vote up
def _prepro_cpg(self, states, dists):
        """Preprocess the state and distance of neighboring CpG sites."""
        prepro_states = []
        prepro_dists = []
        for state, dist in zip(states, dists):
            nan = state == dat.CPG_NAN
            if np.any(nan):
                state[nan] = np.random.binomial(1, state[~nan].mean(),
                                                nan.sum())
                dist[nan] = self.cpg_max_dist
            dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist
            prepro_states.append(np.expand_dims(state, 1))
            prepro_dists.append(np.expand_dims(dist, 1))
        prepro_states = np.concatenate(prepro_states, axis=1)
        prepro_dists = np.concatenate(prepro_dists, axis=1)
        if self.cpg_wlen:
            center = prepro_states.shape[2] // 2
            delta = self.cpg_wlen // 2
            tmp = slice(center - delta, center + delta)
            prepro_states = prepro_states[:, :, tmp]
            prepro_dists = prepro_dists[:, :, tmp]
        return (prepro_states, prepro_dists) 
Example 8
Project: pytorch_NER_BiLSTM_CNN_CRF   Author: bamtercelboo   File: Embed.py    License: Apache License 2.0 6 votes vote down vote up
def _avg_embed(self, embed_dict, words_dict):
        """
        :param embed_dict:
        :param words_dict:
        """
        print("loading pre_train embedding by avg for out of vocabulary.")
        embeddings = np.zeros((int(self.words_count), int(self.dim)))
        inword_list = {}
        for word in words_dict:
            if word in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
                inword_list[words_dict[word]] = 1
                self.exact_count += 1
            elif word.lower() in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
                inword_list[words_dict[word]] = 1
                self.fuzzy_count += 1
            else:
                self.oov_count += 1
        sum_col = np.sum(embeddings, axis=0) / len(inword_list)  # avg
        for i in range(len(words_dict)):
            if i not in inword_list and i != self.padID:
                embeddings[i] = sum_col
        final_embed = torch.from_numpy(embeddings).float()
        return final_embed 
Example 9
Project: deep-learning-note   Author: wdxtub   File: multi_layer_net.py    License: MIT License 6 votes vote down vote up
def loss(self, x, t):
        """求损失函数
        Parameters
        ----------
        x : 输入数据
        t : 教师标签
        Returns
        -------
        损失函数的值
        """
        y = self.predict(x)

        weight_decay = 0
        for idx in range(1, self.hidden_layer_num + 2):
            W = self.params['W' + str(idx)]
            weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W ** 2)

        return self.last_layer.forward(y, t) + weight_decay 
Example 10
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    License: MIT License 6 votes vote down vote up
def cost0(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    return J 
Example 11
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    License: MIT License 6 votes vote down vote up
def cost(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    # add the cost regularization term
    J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2)))
    
    return J 
Example 12
Project: deep-learning-note   Author: wdxtub   File: 9_anomaly_and_rec.py    License: MIT License 6 votes vote down vote up
def select_threshold(pval, yval):
    best_epsilon = 0
    best_f1 = 0
    f1 = 0
    
    step = (pval.max() - pval.min()) / 1000
    
    for epsilon in np.arange(pval.min(), pval.max(), step):
        preds = pval < epsilon
        
        tp = np.sum(np.logical_and(preds == 1, yval == 1)).astype(float)
        fp = np.sum(np.logical_and(preds == 1, yval == 0)).astype(float)
        fn = np.sum(np.logical_and(preds == 0, yval == 1)).astype(float)
        
        precision = tp / (tp + fp)
        recall = tp / (tp + fn)
        f1 = (2 * precision * recall) / (precision + recall)
        
        if f1 > best_f1:
            best_f1 = f1
            best_epsilon = epsilon
    
    return best_epsilon, best_f1 
Example 13
Project: deep-learning-note   Author: wdxtub   File: 9_anomaly_and_rec.py    License: MIT License 6 votes vote down vote up
def cost(params, Y, R, num_features):
    Y = np.matrix(Y)  # (1682, 943)
    R = np.matrix(R)  # (1682, 943)
    num_movies = Y.shape[0]
    num_users = Y.shape[1]
    
    # reshape the parameter array into parameter matrices
    X = np.matrix(np.reshape(params[:num_movies * num_features], (num_movies, num_features)))  # (1682, 10)
    Theta = np.matrix(np.reshape(params[num_movies * num_features:], (num_users, num_features)))  # (943, 10)
    
    # initializations
    J = 0
    
    # compute the cost
    error = np.multiply((X * Theta.T) - Y, R)  # (1682, 943)
    squared_error = np.power(error, 2)  # (1682, 943)
    J = (1. / 2) * np.sum(squared_error)
    
    return J 
Example 14
Project: deep-learning-note   Author: wdxtub   File: 3_logistic_regression.py    License: MIT License 6 votes vote down vote up
def gradientReg(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    
    parameters = int(theta.ravel().shape[1])
    grad = np.zeros(parameters)
    
    error = sigmoid(X * theta.T) - y
    
    for i in range(parameters):
        term = np.multiply(error, X[:,i])
        
        if (i == 0):
            grad[i] = np.sum(term) / len(X)
        else:
            grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i])
    
    return grad 
Example 15
Project: svviz   Author: svviz   File: kde.py    License: MIT License 5 votes vote down vote up
def evaluate(self, points):
        points = atleast_2d(points)

        d, m = points.shape
        if d != self.d:
            if d == 1 and m == self.d:
                # points was passed in as a row vector
                points = reshape(points, (self.d, 1))
                m = 1
            else:
                msg = "points have dimension %s, dataset has dimension %s" % (d,
                    self.d)
                raise ValueError(msg)

        result = zeros((m,), dtype=np.float)

        if m >= self.n:
            # there are more points than data, so loop over data
            for i in range(self.n):
                diff = self.dataset[:, i, newaxis] - points
                tdiff = dot(self.inv_cov, diff)
                energy = sum(diff*tdiff,axis=0) / 2.0
                result = result + exp(-energy)
        else:
            # loop over points
            for i in range(m):
                diff = self.dataset - points[:, i, newaxis]
                tdiff = dot(self.inv_cov, diff)
                energy = sum(diff * tdiff, axis=0) / 2.0
                result[i] = sum(exp(-energy), axis=0)

        result = result / self._norm_factor

        return result 
Example 16
Project: libTLDA   Author: wmkouw   File: tcpr.py    License: MIT License 5 votes vote down vote up
def risk(self, Z, theta, q):
        """
        Compute target contrastive pessimistic risk.

        Parameters
        ----------
        Z : array
            target samples (M samples by D features)
        theta : array
            classifier parameters (D features by K classes)
        q : array
            soft labels (M samples by K classes)

        Returns
        -------
        float
            Value of risk function.

        """
        # Number of classes
        K = q.shape[1]

        # Compute negative log-likelihood
        L = self.neg_log_likelihood(Z, theta)

        # Weight loss by soft labels
        for k in range(K):
            L[:, k] *= q[:, k]

        # Sum over weighted losses
        L = np.sum(L, axis=1)

        # Risk is average loss
        return np.mean(L, axis=0) 
Example 17
Project: libTLDA   Author: wmkouw   File: tcpr.py    License: MIT License 5 votes vote down vote up
def combine_class_covariances(self, Si, pi):
        """
        Linear combination of class covariance matrices.

        Parameters
        ----------
        Si : array
            Covariance matrix (D features by D features by K classes)
        pi : array
            class proportions (1 by K classes)

        Returns
        -------
        Si : array
            Combined covariance matrix (D by D)

        """
        # Number of classes
        K = Si.shape[2]

        # Check if w is size K
        if not pi.shape[1] == K:
            raise ValueError('''Number of proportions does not match with number
                             classes of covariance matrix.''')

        # For each class
        for k in range(K):

            # Weight each class-covariance
            Si[:, :, k] = Si[:, :, k] * pi[0, k]

        # Sum over weighted class-covariances
        return np.sum(Si, axis=2) 
Example 18
Project: libTLDA   Author: wmkouw   File: suba.py    License: MIT License 5 votes vote down vote up
def find_medioid(self, X, Y):
        """
        Find point with minimal distance to all other points.

        Parameters
        ----------
        X : array
            data set, with N samples x D features.
        Y : array
            labels to select for which samples to compute distances.

        Returns
        -------
        x : array
            medioid
        ix : int
            index of medioid

        """
        # Initiate an array with infinities
        A = np.full((X.shape[0],), np.inf)

        # Insert sum of distances to other points
        A[Y] = np.sum(squareform(pdist(X[Y, :])), axis=1)

        # Find the index of the point with the smallest distance
        ix = np.argmin(A)

        return X[ix, :], ix 
Example 19
Project: libTLDA   Author: wmkouw   File: rba.py    License: MIT License 5 votes vote down vote up
def posterior(self, psi):
        """
        Class-posterior estimation.

        Parameters
        ----------
        psi : array
            weighted data-classifier output (N samples by K classes)

        Returns
        -------
        pyx : array
            class-posterior estimation (N samples by K classes)

        """
        # Data shape
        N, K = psi.shape

        # Preallocate array
        pyx = np.zeros((N, K))

        # Subtract maximum value for numerical stability
        psi = (psi.T - np.max(psi, axis=1).T).T

        # Loop over classes
        for k in range(K):

            # Estimate posterior p^(Y=y | x_i)
            pyx[:, k] = np.exp(psi[:, k]) / np.sum(np.exp(psi), axis=1)

        return pyx 
Example 20
Project: libTLDA   Author: wmkouw   File: scl.py    License: MIT License 5 votes vote down vote up
def Huber_grad(self, theta, X, y, l2=0.0):
        """
        Huber gradient computation.

        Reference: Ando & Zhang (2005a). A framework for learning predictive
        structures from multiple tasks and unlabeled data. JMLR.

        Parameters
        ----------
        theta : array
            classifier parameters (D features by 1)
        X : array
            data (N samples by D features)
        y : array
            label vector (N samples by 1)
        l2 : float
            l2-regularization parameter (def= 0.0)

        Returns
        -------
        array
            Gradient with respect to classifier parameters

        """
        # Precompute terms
        Xy = (X.T*y.T).T
        Xyt = np.dot(Xy, theta)

        # Indices of discontinuity
        ix = (Xyt >= -1)

        # Gradient
        return np.sum(2*np.clip(1-Xyt[ix], 0, None).T * -Xy[ix, :].T,
                      axis=1).T + np.sum(-4*Xy[~ix, :], axis=0) + 2*l2*theta 
Example 21
Project: Financial-NLP   Author: Coldog2333   File: NLP.py    License: Apache License 2.0 5 votes vote down vote up
def unitvec(vector, ax=1):
    v=vector*vector
    if len(vector.shape)==1:
        sqrtv=np.sqrt(np.sum(v))
    elif len(vector.shape)==2:
        sqrtv=np.sqrt([np.sum(v, axis=ax)])
    else:
        raise Exception('It\'s too large.')
    if ax==1:
        result=np.divide(vector,sqrtv.T)
    elif ax==0:
        result=np.divide(vector,sqrtv)
    return result 
Example 22
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: tracking.py    License: GNU General Public License v2.0 5 votes vote down vote up
def chi2_distance(histA, histB, eps = 1e-10):
	# compute the chi-squared distance
	d = 0.5 * np.sum([((a - b) ** 2) / (a + b + eps)
		for (a, b) in zip(histA, histB)])

	# return the chi-squared distance
	return d 
Example 23
Project: keras_mixnets   Author: titu1994   File: mixnets.py    License: MIT License 5 votes vote down vote up
def _split_channels(total_filters, num_groups):
    split = [total_filters // num_groups for _ in range(num_groups)]
    split[0] += total_filters - sum(split)
    return split


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/mixnet_model.py 
Example 24
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    License: Apache License 2.0 5 votes vote down vote up
def shared(shape, name):
#{{{
    """
    Create a shared object of a numpy array.
    """ 
    init=initializations.get('glorot_uniform');
    if len(shape) == 1:
        value = np.zeros(shape)  # bias are initialized with zeros
        return theano.shared(value=value.astype(theano.config.floatX), name=name)
    else:
        drange = np.sqrt(6. / (np.sum(shape)))
        value = drange * np.random.uniform(low=-1.0, high=1.0, size=shape)
        return init(shape=shape,name=name);
#}}} 
Example 25
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example 26
Project: DDPAE-video-prediction   Author: jthsieh   File: metrics.py    License: MIT License 5 votes vote down vote up
def get_scores(self):
    # Save positions
    if self.save_path != '':
      positions = np.array([np.concatenate(self.pred_positions, axis=0),
                            np.concatenate(self.gt_positions, axis=0)])
      np.save(os.path.join(self.save_path), positions)

    masks = np.concatenate(self.masks, axis=0)
    cosine = np.concatenate(self.cosine_similarities, axis=0)
    rel_error = np.concatenate(self.relative_errors, axis=0)

    numel = np.sum(masks == 1, axis=(0,2))
    rel_error = np.sum(rel_error * masks, axis=(0,2)) / numel
    cosine = np.sum(cosine * masks, axis=(0,2)) / numel
    return {'relative_errors': rel_error, 'cosine_similarities': cosine} 
Example 27
Project: gated-graph-transformer-network   Author: hexahedria   File: ggtnn_train.py    License: MIT License 5 votes vote down vote up
def test_accuracy(m, story_buckets, bucket_sizes, num_answer_words, format_spec, batch_size, batch_auto_adjust=None, test_graph=False):
    correct = 0
    out_of = 0
    for bucket, bucket_size in zip(story_buckets, bucket_sizes):
        cur_batch_size = adj_size(m, bucket_size, batch_size, batch_auto_adjust)
        for start_idx in range(0, len(bucket), cur_batch_size):
            stories = bucket[start_idx:start_idx+cur_batch_size]
            batch = assemble_batch(stories, num_answer_words, format_spec)
            answers = batch[2]
            args = batch[:2] + ((answers.shape[1],) if format_spec == model.ModelOutputFormat.sequence else ())

            if test_graph:
                _, batch_close, _ = m.eval(*batch, with_accuracy=True)
            else:
                out_answers, out_strengths, out_ids, out_states, out_edges = m.snap_test_fn(*args)
                close = np.isclose(out_answers, answers)
                batch_close = np.all(close, (1,2))

            print(batch_close)

            batch_correct = np.sum(batch_close).tolist()
            batch_out_of = len(stories)
            correct +=  batch_correct
            out_of += batch_out_of

    return correct/out_of 
Example 28
Project: dustmaps   Author: gregreen   File: marshall.py    License: GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, map_fname=None):
        """
        Args:
            map_fname (Optional[:obj:`str`]): Filename at which the map is stored.
                Defaults to ``None``, meaning that the default filename is used.
        """
        if map_fname is None:
            map_fname = os.path.join(data_dir(), 'marshall', 'marshall.h5')

        with h5py.File(map_fname, 'r') as f:
            self._l = f['l'][:]
            self._b = f['b'][:]
            self._A = f['A'][:]
            self._sigma_A = f['sigma_A'][:]
            self._dist = f['dist'][:]
            self._sigma_dist = f['sigma_dist'][:]

        # self._l.shape = (self._l.size,)
        # self._b.shape = (self._b.size,)
        # self._A.shape = (self._A.shape[0], self._A.shape[1]*self._A.shape[2])

        # Shape of the (l,b)-grid
        self._shape = self._l.shape

        # Number of distance bins in each sightline
        self._n_dists = np.sum(np.isfinite(self._dist), axis=2)

        # idx = ~np.isfinite(self._dist)
        # if np.any(idx):
        #     self._dist[idx] = np.inf

        self._l_bounds = (-100., 100.) # min,max Galactic longitude, in deg
        self._b_bounds = (-10., 10.)    # min,max Galactic latitude, in deg
        self._inv_pix_scale = 4.        # 1 / (pixel scale, in deg) 
Example 29
Project: dustmaps   Author: gregreen   File: test_marshall.py    License: GNU General Public License v2.0 5 votes vote down vote up
def test_bounds(self):
        """
        Test that out-of-bounds coordinates return NaN reddening, and that
        in-bounds coordinates do not return NaN reddening.
        """

        for return_sigma in [False, True]:
            # Draw random coordinates on the sphere
            n_pix = 10000
            u, v = np.random.random((2,n_pix))
            l = 360. * u - 180.
            b = 90. - np.degrees(np.arccos(2.*v - 1.))
            d = 5. * np.random.random(l.shape)
            c = coords.SkyCoord(l*units.deg, b*units.deg,
                                distance=d*units.kpc, frame='galactic')

            res = self._marshall(c, return_sigma=return_sigma)

            if return_sigma:
                self.assertTrue(len(res) == 2)
                A, sigma = res
                np.testing.assert_equal(A.shape, sigma.shape)
            else:
                self.assertFalse(isinstance(res, tuple))
                A = res

            in_bounds = (l > -99.) & (l < 99.) & (b < 9.5) & (b > -9.5)
            out_of_bounds = (l < -101.) | (l > 101.) | (b > 10.5) | (b < -10.5)

            n_nan_in_bounds = np.sum(np.isnan(A[in_bounds]))
            n_finite_out_of_bounds = np.sum(np.isfinite(A[out_of_bounds]))

            self.assertTrue(n_nan_in_bounds == 0)
            self.assertTrue(n_finite_out_of_bounds == 0) 
Example 30
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def mle_single(data, x, k=10):
    data = np.asarray(data, dtype=np.float32)
    x = np.asarray(x, dtype=np.float32)
    if x.ndim == 1:
        x = x.reshape((-1, x.shape[0]))
    # dim = x.shape[1]

    k = min(k, len(data)-1)
    f = lambda v: - k / np.sum(np.log(v/v[-1]))
    a = cdist(x, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a[0]

# lid of a batch of query points X