Python numpy.nan_to_num() Examples

The following are 30 code examples of numpy.nan_to_num(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: analysis.py    From metal with Apache License 2.0 6 votes vote down vote up
def lf_overlaps(L, normalize_by_coverage=False):
    """Return the **fraction of items each LF labels that are also labeled by at
     least one other LF.**

    Note that the maximum possible overlap fraction for an LF is the LF's
    coverage, unless `normalize_by_coverage=True`, in which case it is 1.

    Args:
        L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
            jth LF to the ith candidate
        normalize_by_coverage: Normalize by coverage of the LF, so that it
            returns the percent of LF labels that have overlaps.
    """
    overlaps = (L != 0).T @ _overlapped_data_points(L) / L.shape[0]
    if normalize_by_coverage:
        overlaps /= lf_coverages(L)
    return np.nan_to_num(overlaps) 
Example #2
Source File: observation_history.py    From tensortrade with Apache License 2.0 6 votes vote down vote up
def observe(self) -> np.array:
        """Returns the rows to be observed by the agent."""
        rows = self.rows.copy()

        if len(rows) < self.window_size:
            size = self.window_size - len(rows)
            padding = np.zeros((size, rows.shape[1]))
            padding = pd.DataFrame(padding, columns=self.rows.columns)
            rows = pd.concat([padding, rows], ignore_index=True, sort=False)

        if isinstance(rows, pd.DataFrame):
            rows = rows.fillna(0, axis=1)
            rows = rows.values

        rows = np.nan_to_num(rows)

        return rows 
Example #3
Source File: metrics.py    From TCFPN-ISBA with MIT License 6 votes vote down vote up
def macro_accuracy(P, Y, n_classes, bg_class=None, return_all=False, **kwargs):
    def macro_(P, Y, n_classes=None, bg_class=None, return_all=False):
        conf_matrix = sm.confusion_matrix(Y, P, labels=np.arange(n_classes))
        conf_matrix = conf_matrix / (conf_matrix.sum(0)[:, None] + 1e-5)
        conf_matrix = np.nan_to_num(conf_matrix)
        diag = conf_matrix.diagonal() * 100.

        # Remove background score
        if bg_class is not None:
            diag = np.array([diag[i] for i in range(n_classes) if i != bg_class])

        macro = diag.mean()
        if return_all:
            return macro, diag
        else:
            return macro

    if type(P) == list:
        out = [macro_(P[i], Y[i], n_classes=n_classes, bg_class=bg_class, return_all=return_all) for i in range(len(P))]
        if return_all:
            return (np.mean([o[0] for o in out]), np.mean([o[1] for o in out], 0))
        else:
            return np.mean(out)
    else:
        return macro_(P, Y, n_classes=n_classes, bg_class=bg_class, return_all=return_all) 
Example #4
Source File: textrank.py    From deepnlp with MIT License 6 votes vote down vote up
def tfIdf(dtm):
  nDoc = dtm.shape[0]
  nTerm = dtm.shape[1]
  dtmNorm = dtm/dtm.sum(axis=1, keepdims=True) # Normalize tf to unit weight, tf/line word count
  dtmNorm = np.nan_to_num(dtmNorm)
  tfIdfMat = np.zeros((nDoc,nTerm))
  
  for j in range(nTerm):
    tfVect = dtmNorm[:, j]
    nExist = np.sum(tfVect > 0.0) # if tfVect is 0.0, word is not in current doc
    idf = 0.0
    # int32
    if (nExist > 0):
      idf = np.log(nDoc/nExist)/np.log(2) # log2()
    else:
      idf = 0.0
    tfIdfMat[:,j] = tfVect * idf
  
  return tfIdfMat 
Example #5
Source File: textrank.py    From deepnlp with MIT License 6 votes vote down vote up
def pagerank(nDim, adjMat, d, K):
    '''
    Args:
    d: damping factor, 
    K: iteration Number
    '''
    P = np.ones((nDim, 1)) * (1/nDim)
    
    # normalize adjacency Matrix
    B = adjMat/adjMat.sum(axis=1, keepdims=True)
    B = np.nan_to_num(B)
    
    U = np.ones((nDim, nDim)) * (1/nDim)
    
    M = d * B + (1-d) * U
    
    for i in range(K):
        P = np.dot(M.T, P)
    score = P.tolist()
    return P 
Example #6
Source File: Normalizer.py    From FAE with GNU General Public License v3.0 6 votes vote down vote up
def Transform(self, data_container, store_folder='', store_key=''):
        if data_container.IsEmpty():
            return data_container

        new_data_container = deepcopy(data_container)
        array = new_data_container.GetArray()
        array -= self._interception
        array /= self._slop
        array = np.nan_to_num(array)

        new_data_container.SetArray(array)
        new_data_container.UpdateFrameByData()

        if store_folder:
            assert(len(store_key) > 0)
            self.SaveNormalDataContainer(data_container, store_folder, store_key)

        return new_data_container 
Example #7
Source File: voc_detection.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def _average_precision(self, rec, prec):
        """
        calculate average precision, override the default one,
        special 11-point metric

        Params:
        ----------
        rec : numpy.array
            cumulated recall
        prec : numpy.array
            cumulated precision
        Returns:
        ----------
        ap as float
        """
        if rec is None or prec is None:
            return np.nan
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(np.nan_to_num(prec)[rec >= t])
            ap += p / 11.
        return ap 
Example #8
Source File: tools.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def nan_dot(A, B):
    """
    Returns np.dot(left_matrix, right_matrix) with the convention that
    nan * 0 = 0 and nan * x = nan if x != 0.

    Parameters
    ----------
    A, B : np.ndarrays
    """
    # Find out who should be nan due to nan * nonzero
    should_be_nan_1 = np.dot(np.isnan(A), (B != 0))
    should_be_nan_2 = np.dot((A != 0), np.isnan(B))
    should_be_nan = should_be_nan_1 + should_be_nan_2

    # Multiply after setting all nan to 0
    # This is what happens if there were no nan * nonzero conflicts
    C = np.dot(np.nan_to_num(A), np.nan_to_num(B))

    C[should_be_nan] = np.nan

    return C 
Example #9
Source File: env.py    From cwcf with MIT License 6 votes vote down vote up
def __init__(self, data, hpc_p, costs):
        self.data_x = data.iloc[:, 0:-1].astype('float32').values
        self.data_n = np.isnan(self.data_x)
        self.data_x = np.nan_to_num(self.data_x)

        self.data_y = data.iloc[:,   -1].astype('int32').values

        self.data_len = len(data)

        self.hpc_p = hpc_p.values
        self.costs = costs.values

        self.mask = np.zeros( (config.AGENTS, config.FEATURE_DIM), dtype=np.float32 )
        self.x    = np.zeros( (config.AGENTS, config.FEATURE_DIM), dtype=np.float32 )
        self.y    = np.zeros( config.AGENTS, dtype=np.int64 )
        self.p    = np.zeros( config.AGENTS, dtype=np.int32 )
        self.n    = np.zeros( (config.AGENTS, config.FEATURE_DIM), dtype=np.bool ) 
Example #10
Source File: analysis.py    From metal with Apache License 2.0 6 votes vote down vote up
def lf_conflicts(L, normalize_by_overlaps=False):
    """Return the **fraction of items each LF labels that are also given a
    different (non-abstain) label by at least one other LF.**

    Note that the maximum possible conflict fraction for an LF is the LF's
        overlaps fraction, unless `normalize_by_overlaps=True`, in which case it
        is 1.

    Args:
        L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
            jth LF to the ith candidate
        normalize_by_overlaps: Normalize by overlaps of the LF, so that it
            returns the percent of LF overlaps that have conflicts.
    """
    conflicts = (L != 0).T @ _conflicted_data_points(L) / L.shape[0]
    if normalize_by_overlaps:
        conflicts /= lf_overlaps(L)
    return np.nan_to_num(conflicts) 
Example #11
Source File: math_utils.py    From STGCN with GNU General Public License v3.0 5 votes vote down vote up
def masked_mape_np(y_true, y_pred, null_val=0):
    '''
    MAPE
    '''
    with np.errstate(divide='ignore', invalid='ignore'):
        if np.isnan(null_val):
            mask = ~np.isnan(y_true)
        else:
            mask = np.not_equal(y_true, null_val)
        mask = mask.astype('float32')
        mask /= np.mean(mask)
        mape = np.abs(np.divide((y_pred - y_true).astype('float32'), y_true))
        mape = np.nan_to_num(mask * mape)
        return np.mean(mape) * 100 
Example #12
Source File: Dream.py    From PyDREAM with GNU General Public License v3.0 5 votes vote down vote up
def estimate_gamma_level_probs(self, ndim, q0, q_new, gamma_level):
        """Adapt gamma level probabilities during burn-in

        Parameters
        ----------
        ndim : int
            number of dimensions in a draw
        q0 : numpy array
            original point in parameter space
        q_new : numpy array
            new point in parameter space
        gamma_level : int
            gamma level selected for this step"""

        current_positions = np.frombuffer(Dream_shared_vars.current_positions.get_obj())

        current_positions = current_positions.reshape((self.nchains, ndim))

        sd_by_dim = np.std(current_positions, axis=0)
        
        gamma_level_probs = Dream_shared_vars.gamma_level_probs[0:self.ngamma]
            
        gamma_loc = int(np.where(self.gamma_level_values == gamma_level)[0])
            
        Dream_shared_vars.ngamma_updates[gamma_loc] += 1
            
        Dream_shared_vars.delta_m_gamma[gamma_loc] = Dream_shared_vars.delta_m_gamma[gamma_loc] + np.nan_to_num(np.sum(((q_new - q0)/sd_by_dim)**2))
    
        delta_ms_gamma = np.array(Dream_shared_vars.delta_m_gamma[0:self.ngamma])
            
        if np.all(delta_ms_gamma != 0) == True:
                
            for m in range(self.ngamma):
                gamma_level_probs[m] = (Dream_shared_vars.delta_m_gamma[m]/Dream_shared_vars.ngamma_updates[m])*self.nchains
                
            gamma_level_probs = gamma_level_probs/np.sum(gamma_level_probs)
            
        Dream_shared_vars.gamma_level_probs[0:self.ngamma] = gamma_level_probs 
        
        return gamma_level_probs 
Example #13
Source File: Dream.py    From PyDREAM with GNU General Public License v3.0 5 votes vote down vote up
def snooker_update(self, n_proposed_pts, q0):
        """Generate a proposed point with snooker updating scheme.

        Parameters
        ----------
        n_proposed_pts : int
            Number of points to propose this iteration (greater than one if using multi-try update scheme)
        q0 : numpy array
            Original point in parameter space"""
        
        sampled_history_pt = [self.sample_from_history(self.nseedchains, self.DEpairs, self.total_var_dimension, snooker=True) for i in range(n_proposed_pts)]

        chains_to_be_projected = np.squeeze([np.array([self.sample_from_history(self.nseedchains, self.DEpairs, self.total_var_dimension, snooker=True) for i in range(2)]) for x in range(n_proposed_pts)])

        #Define projection vector
        proj_vec_diff = np.squeeze(q0-sampled_history_pt)

        if n_proposed_pts > 1:
            D = [np.dot(proj_vec_diff[point], proj_vec_diff[point]) for point in range(len(proj_vec_diff))]
            
            #Orthogonal projection of chains_to_projected onto projection vector
            diff_chains_to_be_projected = [(chains_to_be_projected[point][0]-chains_to_be_projected[point][1]) for point in range(n_proposed_pts)]       
            zP = np.nan_to_num(np.array([(np.sum(diff_chains_to_be_projected[point]*proj_vec_diff[point])/D[point] *proj_vec_diff[point]) for point in range(n_proposed_pts)]))
            dx = self.gamma*zP
            proposed_pts = [q0 + dx[point] for point in range(n_proposed_pts)]
            norms = [np.linalg.norm(proposed_pts[point] - sampled_history_pt[point]) for point in range(n_proposed_pts)]
            snooker_logp = [np.log(norm, where= norm != 0)*(self.total_var_dimension-1) for norm in norms]

        else:
            D = np.dot(proj_vec_diff, proj_vec_diff)

            #Orthogonal projection of chains_to_projected onto projection vector  
            diff_chains_to_be_projected = chains_to_be_projected[0]-chains_to_be_projected[1]
            zP = np.nan_to_num(np.array([np.sum(np.divide((diff_chains_to_be_projected*proj_vec_diff), D, where= D != 0))]))*proj_vec_diff
            dx = self.gamma*zP
            proposed_pts = q0 + dx
            norm = np.linalg.norm(proposed_pts-sampled_history_pt)
            snooker_logp = np.log(norm, where= norm != 0)*(self.total_var_dimension-1)
        
        return proposed_pts, snooker_logp, sampled_history_pt 
Example #14
Source File: det_metrics.py    From imgclsmob with MIT License 5 votes vote down vote up
def _average_precision(self,
                           rec,
                           prec):
        """
        Calculate average precision.

        Params:
        ----------
        rec : np.array
            cumulated recall
        prec : np.array
            cumulated precision

        Returns:
        ----------
        float
            AP
        """
        if rec is None or prec is None:
            return np.nan

        # append sentinel values at both ends
        mrec = np.concatenate(([0.0], rec, [1.0]))
        mpre = np.concatenate(([0.0], np.nan_to_num(prec), [0.0]))

        # compute precision integration ladder
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # look for recall value changes
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # sum (\delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
        return ap 
Example #15
Source File: det_metrics.py    From imgclsmob with MIT License 5 votes vote down vote up
def _average_precision(self, rec, prec):
        """
        calculate average precision, override the default one,
        special 11-point metric

        Params:
        ----------
        rec : np.array
            cumulated recall
        prec : np.array
            cumulated precision

        Returns:
        ----------
        float
            AP
        """
        if rec is None or prec is None:
            return np.nan
        ap = 0.0
        for t in np.arange(0.0, 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(np.nan_to_num(prec)[rec >= t])
            ap += p / 11.0
        return ap 
Example #16
Source File: gw_optim.py    From otalign with GNU General Public License v3.0 5 votes vote down vote up
def compute_gamma_entropy(self, G):
        if not self.gpu:
            Prod = G * (np.log(G) - 1)
            ent = np.nan_to_num(Prod).sum()
        else:
            Prod = cm.empty(G.shape)
            Prod = G.mult(cm.log(G.copy()).subtract(1), target=Prod)
            ent = np.nan_to_num(Prod.asarray()).sum()
        return ent 
Example #17
Source File: voc_detection.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _average_precision(self, rec, prec):
        """
        calculate average precision

        Params:
        ----------
        rec : numpy.array
            cumulated recall
        prec : numpy.array
            cumulated precision
        Returns:
        ----------
        ap as float
        """
        if rec is None or prec is None:
            return np.nan

        # append sentinel values at both ends
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], np.nan_to_num(prec), [0.]))

        # compute precision integration ladder
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # look for recall value changes
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # sum (\delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
        return ap 
Example #18
Source File: obs_helpers.py    From eht-imaging with GNU General Public License v3.0 5 votes vote down vote up
def amp_debias(amp, sigma, force_nonzero=False):
    """Return debiased visibility amplitudes
    """

    deb2 = np.abs(amp)**2 - np.abs(sigma)**2

    # puts amplitude at 0 if snr < 1
    deb2 *= (np.nan_to_num(np.abs(amp)) > np.nan_to_num(np.abs(sigma)))

    # raises amplitude to sigma to force nonzero
    if force_nonzero:
        deb2 += (np.nan_to_num(np.abs(amp)) < np.nan_to_num(np.abs(sigma))) * np.abs(sigma)**2
    out = np.sqrt(deb2)

    return out 
Example #19
Source File: waymo_ap_metric.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def value(self):
    """Returns weighted mAP over all eval classes."""
    self._EvaluateIfNecessary()
    ap = self._breakdown_metrics['waymo']._average_precisions  # pylint:disable=protected-access
    breakdown_names = config_util.get_breakdown_names_from_config(
        self._waymo_metric_config)

    num_sum = 0.0
    denom_sum = 0.0
    # Compute the average AP over all eval classes.  The first breakdown
    # is the overall mAP.
    for class_index in range(len(self.metadata.EvalClassIndices())):
      num_sum += np.nan_to_num(ap[breakdown_names[0]][class_index])
      denom_sum += 1.
    return num_sum / denom_sum 
Example #20
Source File: infotheo.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def condentropy(px, py, pxpy=None, logbase=2):
    """
    Return the conditional entropy of X given Y.

    Parameters
    ----------
    px : array-like
    py : array-like
    pxpy : array-like, optional
        If pxpy is None, the distributions are assumed to be independent
        and conendtropy(px,py) = shannonentropy(px)
    logbase : int or np.e

    Returns
    -------
    sum_{kj}log(q_{j}/w_{kj}

    where q_{j} = Y[j]
    and w_kj = X[k,j]
    """
    if not _isproperdist(px) or not _isproperdist(py):
        raise ValueError("px or py is not a proper probability distribution")
    if pxpy != None and not _isproperdist(pxpy):
        raise ValueError("pxpy is not a proper joint distribtion")
    if pxpy == None:
        pxpy = np.outer(py,px)
    condent = np.sum(pxpy * np.nan_to_num(np.log2(py/pxpy)))
    if logbase == 2:
        return condent
    else:
        return logbasechange(2, logbase) * condent 
Example #21
Source File: infotheo.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def shannonentropy(px, logbase=2):
    """
    This is Shannon's entropy

    Parameters
    -----------
    logbase, int or np.e
        The base of the log
    px : 1d or 2d array_like
        Can be a discrete probability distribution, a 2d joint distribution,
        or a sequence of probabilities.

    Returns
    -----
    For log base 2 (bits) given a discrete distribution
        H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))]

    For log base 2 (bits) given a joint distribution
        H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj})

    Notes
    -----
    shannonentropy(0) is defined as 0
    """
#TODO: haven't defined the px,py case?
    px = np.asarray(px)
    if not np.all(px <= 1) or not np.all(px >= 0):
        raise ValueError("px does not define proper distribution")
    entropy = -np.sum(np.nan_to_num(px*np.log2(px)))
    if logbase != 2:
        return logbasechange(2,logbase) * entropy
    else:
        return entropy

# Shannon's information content 
Example #22
Source File: terrain_correction.py    From pyeo with GNU General Public License v3.0 5 votes vote down vote up
def build_sample_array(raster_array, slope_array, red_band_index, ir_band_index):
    """
    Returns a set of pixels in raster with slope > 18deg + ndvi > 0.5
    """

    red_band = raster_array[red_band_index, ...]
    ir_band = raster_array[ir_band_index, ...]
    ndvi_array = (ir_band - red_band)/(ir_band + red_band)
    np.nan_to_num(ndvi_array, copy=False)
    mask_array = np.logical_and(ndvi_array>0.5, slope_array > 18)
    return ras.apply_array_image_mask(raster_array, mask_array, fill_value = 0) 
Example #23
Source File: test_var.py    From D-VAE with MIT License 5 votes vote down vote up
def test_numpy_method():
    # This type of code is used frequently by PyMC3 users
    x = tt.dmatrix('x')
    data = np.random.rand(5, 5)
    x.tag.test_value = data
    for fct in [np.arccos, np.arccosh, np.arcsin, np.arcsinh,
                np.arctan, np.arctanh, np.ceil, np.cos, np.cosh, np.deg2rad,
                np.exp, np.exp2, np.expm1, np.floor, np.log,
                np.log10, np.log1p, np.log2, np.rad2deg,
                np.sin, np.sinh, np.sqrt, np.tan, np.tanh, np.trunc]:
        y = fct(x)
        f = theano.function([x], y)
        utt.assert_allclose(np.nan_to_num(f(data)),
                            np.nan_to_num(fct(data))) 
Example #24
Source File: cameraConfig.py    From crappy with GNU General Public License v2.0 5 votes vote down vote up
def run(self):
    """Expects a tuple of 3 args through the pipe:
        - out_size: Tuple, The dimensions of the output histogram image
        - hist_range: Tuple, The lower and upper value of the histogram
          (eg: (0,256) for full scale uint8)
        - img: A numpy array with the image,
          if not single channel, it will be converted to a single channel
    """
    while True:
      out_size,hist_range,img = self.pipe.recv()
      if not isinstance(out_size,tuple):
        break
      hist_range = hist_range[0],hist_range[1]+1
      #np.histogram removes 1 value to the output array, no idea why...
      if len(img.shape) == 3:
        img = np.mean(img,axis=2)
      assert len(img.shape) == 2,"Invalid image: shape= "+str(img.shape)
      # The actual histogram
      h = np.histogram(img,bins=np.arange(*hist_range))[0]
      x = np.arange(out_size[1])# The base of the image
      # We need to interpolate the histogram on the size of the output image
      l = hist_range[1]-hist_range[0]-1
      fx = np.arange(0,out_size[1],out_size[1]/l,dtype=np.float)
      #fx *= out_size[1]/len(fx)
      h2 = np.interp(x,fx,h)
      h2 = np.nan_to_num(h2*out_size[0]/h2.max())
      out_img = np.zeros(out_size)
      for i in range(out_size[1]):
        out_img[0:int(out_size[0]-h2[i]),i] = 255
      self.pipe.send(out_img) 
Example #25
Source File: transformers.py    From PADME with MIT License 5 votes vote down vote up
def transform_array(self, X, y, w):
    """Transform the data in a set of (X, y, w) arrays."""
    if self.transform_X:
      if not hasattr(self, 'move_mean') or self.move_mean:
        X = np.nan_to_num((X - self.X_means) / self.X_stds)
      else:
        X = np.nan_to_num(X / self.X_stds)
    if self.transform_y:
      if not hasattr(self, 'move_mean') or self.move_mean:
        y = np.nan_to_num((y - self.y_means) / self.y_stds)
      else:
        y = np.nan_to_num(y / self.y_stds)
    return (X, y, w) 
Example #26
Source File: column.py    From CHAID with Apache License 2.0 5 votes vote down vote up
def __init__(self, arr=None, metadata=None, missing_id='<missing>',
                 weights=None):
        if not np.issubdtype(arr.dtype, np.number):
            raise ValueError('Must only pass numerical values to create continuous column')

        super(self.__class__, self).__init__(np.nan_to_num(arr), metadata, missing_id=missing_id, weights=weights) 
Example #27
Source File: obsdata.py    From eht-imaging with GNU General Public License v3.0 5 votes vote down vote up
def add_leakage_noise(self, Dterm_amp=0.1, min_noise=0.01, debias=False):
        """Add estimated systematic noise from leakage at quadrature to thermal noise.
           Requires cross-hand visibilities.
           !! this operation is not currently tracked and should be applied with extreme caution!!

           Args:
               Dterm_amp (float): Estimated magnitude of leakage terms
               min_noise (float): Minimum fractional systematic noise to add
               debias (bool): Debias amplitudes before computing fractional noise

           Returns:
               (Obsdata): An Obsdata object with the inflated noise values.
        """

        # Extract visibility amplitudes
        # Switch to Stokes for graceful handling of circular basis products missing RR or LL
        amp = self.switch_polrep('stokes').unpack('amp', debias=debias)['amp']
        rlamp = np.nan_to_num(self.switch_polrep('circ').unpack('rlamp', debias=debias)['rlamp'])
        lramp = np.nan_to_num(self.switch_polrep('circ').unpack('lramp', debias=debias)['lramp'])

        frac_noise = (Dterm_amp * rlamp / amp)**2 + (Dterm_amp * lramp / amp)**2
        frac_noise = frac_noise * (frac_noise > min_noise) + min_noise * (frac_noise < min_noise)

        out = self.copy()
        for sigma in ['sigma1', 'sigma2', 'sigma3', 'sigma4']:
            try:
                field = self.poldict[sigma]
                out.data[field] = (self.data[field]**2 + np.abs(frac_noise * amp)**2)**0.5
            except KeyError:
                continue

        return out 
Example #28
Source File: obsdata.py    From eht-imaging with GNU General Public License v3.0 5 votes vote down vote up
def flag_large_fractional_pol(self, max_fractional_pol=1.0, output='kept'):
        """Flag visibilities for which the fractional polarization is above a specified threshold

           Args:
               max_fractional_pol (float): Maximum fractional polarization
               output (str): returns 'kept', 'flagged', or 'both' (a dictionary)

           Returns:
               (Obsdata): a observation object with flagged data points removed
        """

        m = np.nan_to_num(self.unpack(['mamp'])['mamp'])
        mask = m < max_fractional_pol

        datatable_kept = self.data.copy()
        datatable_flagged = self.data.copy()

        datatable_kept = datatable_kept[mask]
        datatable_flagged = datatable_flagged[np.invert(mask)]
        print('Flagged %d/%d visibilities' % (len(datatable_flagged), len(self.data)))

        obs_kept = self.copy()
        obs_flagged = self.copy()
        obs_kept.data = datatable_kept
        obs_flagged.data = datatable_flagged

        if output == 'flagged':
            return obs_flagged
        elif output == 'both':
            return {'kept': obs_kept, 'flagged': obs_flagged}
        else:
            return obs_kept 
Example #29
Source File: network2.py    From WannaPark with GNU General Public License v3.0 5 votes vote down vote up
def fn(a, y):
        """Return the cost associated with an output ``a`` and desired output
        ``y``.  Note that np.nan_to_num is used to ensure numerical
        stability.  In particular, if both ``a`` and ``y`` have a 1.0
        in the same slot, then the expression (1-y)*np.log(1-a)
        returns nan.  The np.nan_to_num ensures that that is converted
        to the correct value (0.0).

        """
        return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a))) 
Example #30
Source File: block.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def purge_model_nan(net, nan=0.0, posinf=0.0, neginf=0.0, verbose=False):
    """Purge non infinite values in model parameters. GPU trained model may
    contain nan/inf/-inf values which is hidden since CUDNN may handle nan
    implicitly. This may cause model to produce nan during CPU inference.

    Weights will be overwritten inplace.

    Parameters
    ----------
    net : mxnet.gluon.Block
        The network whose weights will be purged to remove nan/inf/-inf.
    nan : float, default is 0.0
        Value to be used to fill NaN values.
        If no value is passed then NaN values will be replaced with 0.0.
    posinf : float, default is 0.0
        Value to be used to fill +Inf values.
        If no value is passed then +Inf values will be replaced with 0.0.
    neginf : float, default is 0.0
        Value to be used to fill -Inf values.
        If no value is passed then -Inf values will be replaced with 0.0.
    verbose : bool
        If True, will print out what parameters are modified.
    """
    for k, v in net.collect_params().items():
        np_data = v.data().asnumpy()
        if not np.isfinite(np_data).all():
            if verbose:
                print(k, ': Overwritten {} values...'.format(
                    np_data.size - np.isfinite(np_data).sum()))
            new_data = np.nan_to_num(np_data, nan=nan, posinf=posinf, neginf=neginf)
            v.set_data(new_data)