Python normalize

60 Python code examples are found related to " normalize". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: nlp_utils.py    From text-gcn-chainer with Creative Commons Zero v1.0 Universal 8 votes vote down vote up
def normalize_text(string):
    """ Text normalization from
    https://github.com/yoonkim/CNN_sentence/blob/23e0e1f735570/process_data.py
    as specified in Yao's paper.
    """
    string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
    string = re.sub(r"\'s", " \'s", string)
    string = re.sub(r"\'ve", " \'ve", string)
    string = re.sub(r"n\'t", " n\'t", string)
    string = re.sub(r"\'re", " \'re", string)
    string = re.sub(r"\'d", " \'d", string)
    string = re.sub(r"\'ll", " \'ll", string)
    string = re.sub(r",", " , ", string)
    string = re.sub(r"!", " ! ", string)
    string = re.sub(r"\(", " \( ", string)
    string = re.sub(r"\)", " \) ", string)
    string = re.sub(r"\?", " \? ", string)
    string = re.sub(r"\s{2,}", " ", string)
    return string.strip().lower() 
Example 2
Source File: cifar10_objectDetection.py    From Deep-Learning-By-Example with MIT License 8 votes vote down vote up
def normalize_images(images):
    # initial zero ndarray
    normalized_images = np.zeros_like(images.astype(float))

    # The first images index is number of images where the other indices indicates
    # hieight, width and depth of the image
    num_images = images.shape[0]

    # Computing the minimum and maximum value of the input image to do the normalization based on them
    maximum_value, minimum_value = images.max(), images.min()

    # Normalize all the pixel values of the images to be from 0 to 1
    for img in range(num_images):
        normalized_images[img, ...] = (images[img, ...] - float(minimum_value)) / float(maximum_value - minimum_value)

    return normalized_images


# encoding the input images. Each image will be represented by a vector of zeros except for the class index of the image
# that this vector represents. The length of this vector depends on number of classes that we have
# the dataset which is 10 in CIFAR-10 
Example 3
Source File: resctrl_allocations.py    From workload-collocation-agent with Apache License 2.0 8 votes vote down vote up
def normalize_mb_string(mb: str, platform_sockets: int, mb_min_bandwidth: int,
                        mb_bandwidth_gran: int) -> str:
    assert mb_min_bandwidth is not None
    assert mb_bandwidth_gran is not None

    if not mb.startswith('MB:'):
        raise InvalidAllocations(
            'mb resources setting should start with "MB:" prefix (got %r)' % mb)

    domains = _parse_schemata_file_row(mb)
    _validate_domains(domains, platform_sockets)

    normalized_mb_string = 'MB:'
    for domain in domains:
        try:
            mb_value = int(domains[domain])
        except ValueError:
            raise InvalidAllocations("{} is not integer format".format(domains[domain]))

        normalized_mb_value = _normalize_mb_value(mb_value, mb_min_bandwidth, mb_bandwidth_gran)
        normalized_mb_string += '{}={};'.format(domain, normalized_mb_value)

    normalized_mb_string = normalized_mb_string[:-1]

    return normalized_mb_string 
Example 4
Source File: graphTools.py    From graph-neural-networks with GNU General Public License v3.0 7 votes vote down vote up
def normalizeAdjacency(W):
    """
    NormalizeAdjacency: Computes the degree-normalized adjacency matrix

    Input:

        W (np.array): adjacency matrix

    Output:

        A (np.array): degree-normalized adjacency matrix
    """
    # Check that the matrix is square
    assert W.shape[0] == W.shape[1]
    # Compute the degree vector
    d = np.sum(W, axis = 1)
    # Invert the square root of the degree
    d = 1/np.sqrt(d)
    # And build the square root inverse degree matrix
    D = np.diag(d)
    # Return the Normalized Adjacency
    return D @ W @ D 
Example 5
Source File: electrodeGUI.py    From simnibs with GNU General Public License v3.0 7 votes vote down vote up
def normalizeAngle(self, angle):
        while angle < 0:
            angle += 360 * 16
        while angle > 360 * 16:
            angle -= 360 * 16
        return angle 
Example 6
Source File: utils.py    From EpiTator with Apache License 2.0 7 votes vote down vote up
def normalize_text(text):
    """
    Attempt to convert text to a simplified representation so it can
    be compared ignoring diacritical marks, differences in character codes
    for similar symbols, differences in whitespace, and similar issues.
    If the simplified text ends up being too short to form an
    useful representation of the text, the original text will be returned.
    That will happen when non-latin text is used because non-latin charaters are
    usually removed from the simplified text.
    """
    text = text.replace('\u2019', "'")
    result = unicodedata.normalize('NFKD', text)\
        .encode('ascii', 'ignore').decode()
    result = space_punct_re.sub(' ', result).strip()
    if len(result) < 3:
        result = space_punct_re.sub(' ', text).strip()
    return result 
Example 7
Source File: text_helpers.py    From TensorFlow-Machine-Learning-Cookbook with MIT License 7 votes vote down vote up
def normalize_text(texts, stops):
    # Lower case
    texts = [x.lower() for x in texts]

    # Remove punctuation
    texts = [''.join(c for c in x if c not in string.punctuation) for x in texts]

    # Remove numbers
    texts = [''.join(c for c in x if c not in '0123456789') for x in texts]

    # Remove stopwords
    texts = [' '.join([word for word in x.split() if word not in (stops)]) for x in texts]

    # Trim extra whitespace
    texts = [' '.join(x.split()) for x in texts]
    
    return(texts)


# Build dictionary of words 
Example 8
Source File: transforms.py    From fastMRI with MIT License 6 votes vote down vote up
def normalize_instance(data, eps=0.):
    """
        Normalize the given tensor using:
            (data - mean) / (stddev + eps)
        where mean and stddev are computed from the data itself.

        Args:
            data (torch.Tensor): Input data to be normalized
            eps (float): Added to stddev to prevent dividing by zero

        Returns:
            torch.Tensor: Normalized tensor
        """
    mean = data.mean()
    std = data.std()
    return normalize(data, mean, std, eps), mean, std


# Helper functions 
Example 9
Source File: base.py    From smallrnaseq with GNU General Public License v3.0 6 votes vote down vote up
def normalize_samples(counts, norm_method='library', rename=True):
    """Normalize over a matrix of samples explicitly, this will overwrite any 'norm'
       columns created previously when pivoting the count data.

       Args:
            counts: dataframe of raw count data with samples per column
            rename: rename columns with 'norm' label and add to existing ones
       Returns: dataframe of raw /normalised read counts
    """

    x = counts
    if norm_method == 'library':
        n = total_library_normalize(x)
    elif norm_method == 'quantile':
        n = quantile_normalize(x)
    if rename == True:
        scols = x.columns
        ncols = n.columns = [i+' norm' for i in n.columns]
        x = x.join(n)
    else:
        x = n
    return x 
Example 10
Source File: sparse_weights.py    From nupic.torch with GNU Affero General Public License v3.0 6 votes vote down vote up
def normalize_sparse_weights(m):
    """Initialize the weights using kaiming_uniform initialization normalized
    to the number of non-zeros in the layer instead of the whole input size.

    Similar to torch.nn.Linear.reset_parameters() but applying weight
    sparsity to the input size
    """
    if isinstance(m, SparseWeightsBase):
        _, input_size = m.module.weight.shape
        fan = int(input_size * (1.0 - m.sparsity))
        gain = nn.init.calculate_gain("leaky_relu", math.sqrt(5))
        std = gain / np.math.sqrt(fan)
        bound = math.sqrt(3.0) * std  # Calculate uniform bounds from standard deviation
        nn.init.uniform_(m.module.weight, -bound, bound)
        if m.module.bias is not None:
            bound = 1 / math.sqrt(fan)
            nn.init.uniform_(m.module.bias, -bound, bound) 
Example 11
Source File: densityTools.py    From armi with Apache License 2.0 6 votes vote down vote up
def normalizeNuclideList(nuclideVector, normalization=1.0):
    """
    normalize the nuclide vector.

    Parameters
    ----------
    nuclideVector : dict
        dictionary of values -- e.g. floats, ints -- indexed by nuclide identifiers -- e.g. nucNames or nuclideBases

    normalization : float

    Returns
    -------
    nuclideVector : dict
        dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases
    """

    normalizationFactor = sum(nuclideVector.values()) / normalization

    for nucName, mFrac in nuclideVector.items():
        nuclideVector[nucName] = mFrac / normalizationFactor

    return nuclideVector 
Example 12
Source File: bv.py    From claripy with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def normalize_types(f):
    @functools.wraps(f)
    def normalize_helper(self, o):
        if _d._DEBUG:
            if hasattr(o, '__module__') and o.__module__ == 'z3':
                raise ValueError("this should no longer happen")
        if isinstance(o, numbers.Number):
            o = BVV(o, self.bits)
        if isinstance(self, numbers.Number):
            self = BVV(self, self.bits)

        if not isinstance(self, BVV) or not isinstance(o, BVV):
            return NotImplemented
        return f(self, o)

    return normalize_helper 
Example 13
Source File: __init__.py    From royal-chaos with MIT License 6 votes vote down vote up
def normalize_type_string(type_string):
    """
    Return a type string in a canonical format, with deterministic
    placement of modifiers and spacing.  Useful to make sure two type
    strings match regardless of small variations of representation
    that do not change the meaning.

    :param type_string: C type expression
    :returns: another string representing the same C type but in a canonical format

    >>> normalize_type_string('char *')
    'char *'
    >>> normalize_type_string('const foo::bar<const char*, zbr&>*')
    'foo::bar< char const *, zbr & > const *'
    >>> normalize_type_string('const ::bar*')
    'bar const *'
    >>> normalize_type_string('const char*const')
    'char const * const'
    >>> normalize_type_string('const char*const*const')
    'char const * const * const'
    >>> normalize_type_string('const std::map<std::string, void (*) (int, std::vector<zbr>) >')
    'std::map< std::string, void ( * ) ( int, std::vector< zbr > ) > const'
    """
    ctype = parse_type(type_string)
    return str(ctype) 
Example 14
Source File: squad_eval.py    From MatchLSTM-PyTorch with MIT License 6 votes vote down vote up
def normalize_answer(s):
    """Lower text and remove punctuation, articles and extra whitespace."""
    def remove_articles(text):
        return re.sub(r'\b(a|an|the|eos|pad)\b', ' ', text)

    def white_space_fix(text):
        return ' '.join(text.split())

    def remove_punc(text):
        exclude = set(string.punctuation)
        return ''.join(ch for ch in text if ch not in exclude)

    def lower(text):
        return text.lower()

    return white_space_fix(remove_articles(remove_punc(lower(s)))) 
Example 15
Source File: parser.py    From aliyun-log-cli with MIT License 6 votes vote down vote up
def normalize_inputs(arguments, method_types):
    method_name = ''
    for m in method_types:
        if m in arguments and arguments[m]:
            method_name = m
            break

    if not method_name:
        raise ValueError("unknown command:" + method_name)

    real_args = dict((k.replace('--', ''), v) for k, v in arguments.items() if k.startswith('--') and v is not None)

    # convert args
    converted_args = _convert_args(real_args, method_types[method_name])

    return method_name, converted_args 
Example 16
Source File: pose_helper.py    From Blender-Metaverse-Addon with GNU General Public License v3.0 6 votes vote down vote up
def normalize_influence_for(constraints, of_type, amount=1.0):
    min_value = -1
    max_value = -1
    total_value = 0
    filtered_constraints = []
    for constraint in constraints:
        if constraint.type == of_type:
            print("Found")
            total_value += constraint.influence
            filtered_constraints.append(constraint)
    
    if len(filtered_constraints) <= 1:
        print("Not sufficient constraints to normalize")
        return False

    for constraint in filtered_constraints:
        constraint.influence = ((constraint.influence) / total_value) * amount 
Example 17
Source File: preprocess_ops.py    From tpu_models with Apache License 2.0 6 votes vote down vote up
def normalize_image(image):
  """Normalize the image.

  Args:
    image: a tensor of shape [height, width, 3] in dtype=tf.float32.

  Returns:
    normalized_image: a tensor which has the same shape and dtype as image,
      with pixel values normalized.
  """
  offset = tf.constant([0.485, 0.456, 0.406])
  offset = tf.expand_dims(offset, axis=0)
  offset = tf.expand_dims(offset, axis=0)
  normalized_image = image - offset
  scale = tf.constant([0.229, 0.224, 0.225])
  scale = tf.expand_dims(scale, axis=0)
  scale = tf.expand_dims(scale, axis=0)
  normalized_image /= scale
  return normalized_image 
Example 18
Source File: util.py    From python-sensor with MIT License 6 votes vote down vote up
def normalize_aws_lambda_arn(context):
    """
    Parse the AWS Lambda context object for a fully qualified AWS Lambda function ARN.

    This method will ensure that the returned value matches the following ARN pattern:
      arn:aws:lambda:${region}:${account-id}:function:${name}:${version}

    @param context:  AWS Lambda context object
    @return:
    """
    try:
        arn = context.invoked_function_arn
        parts = arn.split(':')

        count = len(parts)
        if count == 7:
            # need to append version
            arn = arn + ':' + context.function_version
        elif count != 8:
            logger.debug("Unexpected ARN parse issue: %s", arn)

        return arn
    except:
        logger.debug("normalize_arn: ", exc_info=True) 
Example 19
Source File: encoder.py    From plugin.program.openwizard with GNU General Public License v3.0 6 votes vote down vote up
def normalize_errorlevel(error, accept_none=False):
    """\
    Returns a constant for the provided error level.

    This function returns ``None`` if the provided parameter is ``None`` and
    `accept_none` is set to ``True`` (default: ``False``). If `error` is ``None``
    and `accept_none` is ``False`` or if the provided parameter cannot be
    mapped to a valid QR Code error level, a ErrorLevelError is raised.

    :param error: String or ``None``.
    :param bool accept_none: Indicates if ``None`` is accepted as error level.
    :rtype: int
    """
    if error is None:
        if not accept_none:
            raise ErrorLevelError('The error level must be provided')
        return error
    try:
        return consts.ERROR_MAPPING[error.upper()]
    except:  # KeyError or error.upper() fails
        if error in consts.ERROR_MAPPING.values():
            return error
        raise ErrorLevelError('Illegal error correction level: "{0}".'
                              'Supported levels: {1}'
                              .format(error, ', '.join(sorted(consts.ERROR_MAPPING.keys())))) 
Example 20
Source File: encoder.py    From plugin.program.openwizard with GNU General Public License v3.0 6 votes vote down vote up
def normalize_mode(mode):
    """\
    Returns a (Micro) QR Code mode constant which is equivalent to the
    provided `mode`.

    In case the provided `mode` is ``None``, this function returns ``None``.
    Otherwise a mode constant is returned unless the provided parameter cannot
    be mapped to a valid mode. In the latter case, a ModeError is raised.

    :param mode: An integer or string or ``None``.
    :raises: ModeError: In case the provided `mode` does not represent a valid
             QR Code mode.
    :rtype: int or None
    """
    if mode is None or (isinstance(mode, int)
                        and mode in consts.MODE_MAPPING.values()):
        return mode
    try:
        return consts.MODE_MAPPING[mode.lower()]
    except:  # KeyError or mode.lower() fails
        raise ModeError('Illegal mode "{0}". Supported values: {1}'
                        .format(mode, ', '.join(sorted(consts.MODE_MAPPING.keys())))) 
Example 21
Source File: encoder.py    From plugin.program.openwizard with GNU General Public License v3.0 6 votes vote down vote up
def normalize_mask(mask, is_micro):
    """\
    Normalizes the (user specified) mask.

    :param mask: A mask constant
    :type mask: int or None
    :param bool is_micro: Indicates if the mask is meant to be used for a
            Micro QR Code.
    """
    if mask is None:
        return None
    try:
        mask = int(mask)
    except ValueError:
        raise MaskError('Invalid data mask "{0}". Must be an integer or a string which represents an integer value.'.format(mask))
    if is_micro:
        if not 0 <= mask < 4:
            raise MaskError('Invalid data mask "{0}" for Micro QR Code. Must be in range 0 .. 3'.format(mask))
    else:
        if not 0 <= mask < 8:
            raise MaskError('Invalid data mask "{0}". Must be in range 0 .. 7'.format(mask))
    return mask 
Example 22
Source File: base_provider.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def normalize_images(self, images, normalization_type):
        """
        Args:
            images: numpy 4D array
            normalization_type: `str`, available choices:
                - divide_255
                - divide_256
                - by_chanels
        """
        if normalization_type == 'divide_255':
            images = images / 255
        elif normalization_type == 'divide_256':
            images = images / 256
        elif normalization_type == 'by_chanels':
            images = images.astype('float64')
            # for every channel in image(assume this is last dimension)
            for i in range(images.shape[-1]):
                images[:, :, :, i] = ((images[:, :, :, i] - self.images_means[i]) /
                                       self.images_stds[i])
        else:
            raise Exception("Unknown type of normalization")
        return images 
Example 23
Source File: preprocess.py    From efficientdet-tf with GNU General Public License v3.0 6 votes vote down vote up
def normalize_image(image: tf.Tensor) -> tf.Tensor:
    """
    Normalize the image according imagenet mean and std

    Parameters
    ----------
    image: tf.Tensor of shape [H, W, C]
        Image in [0, 1] range
    
    Returns
    -------
    tf.Tensor
        Normalized image
    """
    mean = tf.constant([0.485, 0.456, 0.406])
    std = tf.constant([0.229, 0.224, 0.225])
    return (image - mean) / std 
Example 24
Source File: imgaug.py    From imgaug with MIT License 6 votes vote down vote up
def normalize_random_state(random_state):
    """Normalize various inputs to a numpy random generator.

    Parameters
    ----------
    random_state : None or int or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.bit_generator.SeedSequence or numpy.random.RandomState
        See :func:`~imgaug.random.normalize_generator`.

    Returns
    -------
    numpy.random.Generator or numpy.random.RandomState
        In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator`` (even if
        the input was a ``RandomState``).

    """
    import imgaug.random
    return imgaug.random.normalize_generator_(random_state) 
Example 25
Source File: utils.py    From imgaug with MIT License 6 votes vote down vote up
def normalize_shape(shape):
    """Normalize a shape ``tuple`` or ``array`` to a shape ``tuple``.

    Parameters
    ----------
    shape : tuple of int or ndarray
        The input to normalize. May optionally be an array.

    Returns
    -------
    tuple of int
        Shape ``tuple``.

    """
    if isinstance(shape, tuple):
        return shape
    assert ia.is_np_array(shape), (
        "Expected tuple of ints or array, got %s." % (type(shape),))
    return shape.shape 
Example 26
Source File: __init__.py    From CityEnergyAnalyst with MIT License 6 votes vote down vote up
def normalize_data_costs(self, data_processed, normalization, analysis_fields):
        if normalization == "gross floor area":
            data = pd.read_csv(self.locator.get_total_demand())
            normalization_factor = sum(data['GFA_m2'])
            data_processed = data_processed.apply(
                lambda x: x / normalization_factor if x.name in analysis_fields else x)
        elif normalization == "net floor area":
            data = pd.read_csv(self.locator.get_total_demand())
            normalization_factor = sum(data['Aocc_m2'])
            data_processed = data_processed.apply(
                lambda x: x / normalization_factor if x.name in analysis_fields else x)
        elif normalization == "air conditioned floor area":
            data = pd.read_csv(self.locator.get_total_demand())
            normalization_factor = sum(data['Af_m2'])
            data_processed = data_processed.apply(
                lambda x: x / normalization_factor if x.name in analysis_fields else x)
        elif normalization == "building occupancy":
            data = pd.read_csv(self.locator.get_total_demand())
            normalization_factor = sum(data['people0'])
            data_processed = data_processed.apply(
                lambda x: x / normalization_factor if x.name in analysis_fields else x)
        return data_processed 
Example 27
Source File: titer_model.py    From augur with GNU Affero General Public License v3.0 6 votes vote down vote up
def normalize_titers(self):
        '''
        convert the titer measurements into the log2 difference between the average
        titer measured between test virus and reference serum and the average
        homologous titer. all measurements relative to sera without homologous titer
        are excluded
        '''
        self.determine_autologous_titers()

        self.titers_normalized = {}
        self.consensus_titers_raw = {}
        self.measurements_per_serum = defaultdict(int)
        for (test, ref), val in self.titers.items():
            if ref in self.autologous_titers: # use only titers for which estimates of the autologous titer exists
                self.titers_normalized[(test, ref)] = self.normalize(ref, val)
                self.consensus_titers_raw[(test, ref)] = np.median(val)
                self.measurements_per_serum[ref]+=1
            else:
                pass
                #print("no homologous titer found:", ref) 
Example 28
Source File: utils.py    From DFGN-pytorch with MIT License 6 votes vote down vote up
def normalize_answer(s):

    def remove_articles(text):
        return re.sub(r'\b(a|an|the)\b', ' ', text)

    def white_space_fix(text):
        return ' '.join(text.split())

    def remove_punc(text):
        exclude = set(string.punctuation)
        return ''.join(ch for ch in text if ch not in exclude)

    def lower(text):
        return text.lower()

    return white_space_fix(remove_articles(remove_punc(lower(s)))) 
Example 29
Source File: extractive_qa.py    From jack with MIT License 6 votes vote down vote up
def normalize_answer(s):
    """Lower text and remove punctuation, articles and extra whitespace."""

    def remove_articles(text):
        return re.sub(r'\b(a|an|the)\b', ' ', text)

    def white_space_fix(text):
        return ' '.join(text.split())

    def remove_punc(text):
        exclude = set(string.punctuation)
        return ''.join(ch for ch in text if ch not in exclude)

    def lower(text):
        return text.lower()

    return white_space_fix(remove_articles(remove_punc(lower(s)))) 
Example 30
Source File: feature_extractor.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def NormalizePixelValues(image,
                         pixel_value_offset=128.0,
                         pixel_value_scale=128.0):
  """Normalize image pixel values.

  Args:
    image: a uint8 tensor.
    pixel_value_offset: a Python float, offset for normalizing pixel values.
    pixel_value_scale: a Python float, scale for normalizing pixel values.

  Returns:
    image: a float32 tensor of the same shape as the input image.
  """
  image = tf.to_float(image)
  image = tf.div(tf.subtract(image, pixel_value_offset), pixel_value_scale)
  return image 
Example 31
Source File: patchcheck.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def normalize_docs_whitespace(file_paths):
    fixed = []
    for path in file_paths:
        abspath = os.path.join(SRCDIR, path)
        try:
            with open(abspath, 'rb') as f:
                lines = f.readlines()
            new_lines = [ws_re.sub(br'\1', line) for line in lines]
            if new_lines != lines:
                shutil.copyfile(abspath, abspath + '.bak')
                with open(abspath, 'wb') as f:
                    f.writelines(new_lines)
                fixed.append(path)
        except Exception as err:
            print('Cannot fix %s: %s' % (path, err))
    return fixed 
Example 32
Source File: complex_angular_central_gaussian.py    From pb_bss with MIT License 6 votes vote down vote up
def normalize_observation(observation):
    """

    Attention: swap D and N dim

    The dimensions are swapped, because some calculations (e.g. covariance) do
    a reduction over the sample (time) dimension. Having the time dimension on
    the last axis improves the execution time.

    Args:
        observation: (..., N, D)

    Returns:
        normalized observation (..., D, N)
    """
    observation = _unit_norm(
        observation,
        axis=-1,
        eps=np.finfo(observation.dtype).tiny,
        eps_style='where',
    )
    return np.ascontiguousarray(np.swapaxes(observation, -2, -1)) 
Example 33
Source File: squad_utils.py    From albert with Apache License 2.0 6 votes vote down vote up
def normalize_answer_v1(s):
  """Lower text and remove punctuation, articles and extra whitespace."""

  def remove_articles(text):
    return re.sub(r"\b(a|an|the)\b", " ", text)

  def white_space_fix(text):
    return " ".join(text.split())

  def remove_punc(text):
    exclude = set(string.punctuation)
    return "".join(ch for ch in text if ch not in exclude)

  def lower(text):
    return text.lower()

  return white_space_fix(remove_articles(remove_punc(lower(s)))) 
Example 34
Source File: scaling.py    From CatLearn with GNU General Public License v3.0 6 votes vote down vote up
def target_normalize(target):
    """Return a list of normalized target values.

    Parameters
    ----------
    target : list
        A list of the target values.
    """
    target = np.asarray(target)

    data = defaultdict(list)
    data['mean'] = np.mean(target, axis=0)
    data['dif'] = np.max(target, axis=0) - np.min(target, axis=0)
    data['target'] = (target - data['mean']) / data['dif']

    return data 
Example 35
Source File: _common.py    From pathvalidate with MIT License 6 votes vote down vote up
def normalize_platform(name: PlatformType) -> Platform:
    if isinstance(name, Platform):
        return name

    if name:
        name = name.strip().lower()

    if name == "posix":
        return Platform.POSIX

    if name == "auto":
        name = platform.system().lower()

    if name in ["linux"]:
        return Platform.LINUX

    if name and name.startswith("win"):
        return Platform.WINDOWS

    if name in ["mac", "macos", "darwin"]:
        return Platform.MACOS

    return Platform.UNIVERSAL 
Example 36
Source File: data_preprocesser.py    From BCNN-keras-clean with MIT License 6 votes vote down vote up
def normalize_image(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
    '''Normalization.

    Args:
        x: input image.
        mean: mean value of the input image.
        std: standard deviation value of the input image.

    Returns:
        Normalized image.
    '''

    x = np.asarray(x, dtype=np.float32)
    if len(x.shape) == 4:
        for dim in range(3):
            x[:, :, :, dim] = (x[:, :, :, dim] - mean[dim]) / std[dim]
    if len(x.shape) == 3:
        for dim in range(3):
            x[:, :, dim] = (x[:, :, dim] - mean[dim]) / std[dim]

    return x 
Example 37
Source File: setting_parsers.py    From Thrifty with GNU General Public License v3.0 6 votes vote down vote up
def normalize_freq_range(range_, bin_freq):
    """Normalize a frequency range to discrete frequency bin values.

    Parameters
    ----------
    range_: (float, float, bool)
        The result of `freq_range`, thus (start, stop, unit_hz).
    bin_freq : float
        Width of each frequency bin, in Hertz.

    Returns
    -------
    start_bin : int
    """

    start, stop, hz_unit = range_
    if not hz_unit:
        return int(start), int(stop)
    else:
        start = int(start / bin_freq)
        stop = int(stop / bin_freq)
        return start, stop 
Example 38
Source File: utils.py    From latent_ode with MIT License 6 votes vote down vote up
def normalize_masked_data(data, mask, att_min, att_max):
	# we don't want to divide by zero
	att_max[ att_max == 0.] = 1.

	if (att_max != 0.).all():
		data_norm = (data - att_min) / att_max
	else:
		raise Exception("Zero!")

	if torch.isnan(data_norm).any():
		raise Exception("nans!")

	# set masked out elements back to zero 
	data_norm[mask == 0] = 0

	return data_norm, att_min, att_max 
Example 39
Source File: utils.py    From latent_ode with MIT License 6 votes vote down vote up
def normalize_data(data):
	reshaped = data.reshape(-1, data.size(-1))

	att_min = torch.min(reshaped, 0)[0]
	att_max = torch.max(reshaped, 0)[0]

	# we don't want to divide by zero
	att_max[ att_max == 0.] = 1.

	if (att_max != 0.).all():
		data_norm = (data - att_min) / att_max
	else:
		raise Exception("Zero!")

	if torch.isnan(data_norm).any():
		raise Exception("nans!")

	return data_norm, att_min, att_max 
Example 40
Source File: context.py    From xcube with MIT License 6 votes vote down vote up
def normalize_prefix(prefix: Optional[str]) -> str:
    if not prefix:
        return ''

    prefix = prefix.replace('${name}', 'xcube')
    prefix = prefix.replace('${version}', version)
    prefix = prefix.replace('//', '/').replace('//', '/')

    if prefix == '/':
        return ''

    if not prefix.startswith('/'):
        prefix = '/' + prefix

    if prefix.endswith('/'):
        prefix = prefix[0:-1]

    return prefix


# noinspection PyUnusedLocal 
Example 41
Source File: dal_env.py    From dal with MIT License 6 votes vote down vote up
def normalize_gtl(self):
		if type(self.gt_likelihood_high).__name__ == 'torch.CudaTensor':
			gt_high = self.gt_likelihood_high.cpu().numpy()
		elif type(self.gt_likelihood_high).__name__ == 'Tensor':
			gt_high = self.gt_likelihood_high.cpu().numpy()
		else:
			gt_high = self.gt_likelihood_high
		#self.gt_likelihood_unnormalized = np.copy(self.gt_likelihood)
		if self.args.gtl_output == "softmax":
			gt_high = softmax(gt_high, self.args.temperature)
		elif self.args.gtl_output == "softermax":
			gt_high = softermax(gt_high.cpu())
		elif self.args.gtl_output == "linear":
			gt_high = np.clip(gt_high.cpu(), 1e-5, 1.0)
			gt_high = gt_high / gt_high.sum()
		self.gt_likelihood_high = torch.tensor(gt_high).float().to(self.device) 
Example 42
Source File: theano_backend.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
Example 43
Source File: utils.py    From ReGraph with MIT License 6 votes vote down vote up
def normalize_relation(relation):
    new_relation_dict = dict()
    for key, values in relation.items():
        if type(values) == set:
            new_relation_dict[key] = values
        elif type(values) == str:
            new_relation_dict[key] = {values}
        else:
            try:
                new_set = set()
                for v in values:
                    new_set.add(v)
                new_relation_dict[key] = new_set
            except TypeError:
                new_relation_dict[key] = {values}
    relation = new_relation_dict
    return new_relation_dict 
Example 44
Source File: utils.py    From ReGraph with MIT License 6 votes vote down vote up
def normalize_typing_relation(typing_rel):
    new_typing_rel = format_typing(typing_rel)
    for g, typing_rel in new_typing_rel.items():
        for key, values in typing_rel.items():
            value_set = set()
            if type(values) == str:
                value_set.add(values)
            else:
                try:
                    for v in values:
                        value_set.add(v)
                except TypeError:
                    value_set.add(values)
            if len(value_set) > 0:
                new_typing_rel[g][key] = value_set
    return new_typing_rel 
Example 45
Source File: __init__.py    From uoft-scrapers with MIT License 6 votes vote down vote up
def normalize_text_sections(div):
        paragraph = ''
        for content in div.contents:
            text = ''
            if type(content) == NavigableString:
                text = content
            elif type(content) == Comment:
                pass
            elif content.name == 'li':
                text = content.text
            else:
                text = content.text
            text = text.strip()
            paragraph += text.strip() + ' '
        paragraph = paragraph.strip()
        paragraph = paragraph.replace('\r', '')
        paragraph = paragraph.replace('\n', ', ')
        paragraph = paragraph.strip()
        return paragraph 
Example 46
Source File: keyfile.py    From etheno with GNU Affero General Public License v3.0 6 votes vote down vote up
def normalize_keys(keyfile_json):
    for key, value in keyfile_json.items():
        if is_string(key):
            norm_key = key.lower()
        else:
            norm_key = key

        if is_dict(value):
            norm_value = normalize_keys(value)
        else:
            norm_value = value

        yield norm_key, norm_value


#
# Version 3 creators
# 
Example 47
Source File: edvehicles.py    From edr with Apache License 2.0 6 votes vote down vote up
def normalize_module_name(name):
        normalized = name.lower()
        
        # suffix _name or _name; is not used in loadout or afmurepair events 
        if normalized.endswith(u"_name"):
            useless_suffix_length = len(u"_name")
            normalized = normalized[:-useless_suffix_length]
        elif normalized.endswith(u"_name;"):
            useless_suffix_length = len(u"_name;")
            normalized = normalized[:-useless_suffix_length]

        if normalized.startswith(u"$"):
            normalized = normalized[1:]

        # just get rid of prefixes because sometimes int_ becomes ext_ depending on the event
        if normalized.startswith((u"int_", u"ext_", u"hpt_")):
            normalized = normalized[4:]
        return normalized 
Example 48
Source File: grad_normalizers.py    From Hydra with MIT License 6 votes vote down vote up
def normalize_grads(grads, losses, normalization_type):
    """Grads should be a 2D tensor of flattened gradients
    """
    if normalization_type == 'l2':
        gns = (grads.pow(2).sum(grads, 1)).sqrt()
    elif normalization_type == 'loss':
        gns = losses
    elif normalization_type == 'loss+':
        gns = losses * (grads.pow(2).sum(1)).sqrt()
    else:
        gns = torch.ones(grads.shape[0], device=grads.device)

    # according to documentation, the following are in-place
    transposed = torch.transpose(grads, 1, 0)
    transposed /= (gns + 1e-8)
    grads = torch.transpose(transposed, 1, 0)
    return grads 
Example 49
Source File: utils.py    From plugin.audio.spotify with GNU General Public License v3.0 6 votes vote down vote up
def normalize_string(text):
    import unicodedata
    text = text.replace(":", "")
    text = text.replace("/", "-")
    text = text.replace("\\", "-")
    text = text.replace("<", "")
    text = text.replace(">", "")
    text = text.replace("*", "")
    text = text.replace("?", "")
    text = text.replace('|', "")
    text = text.replace('(', "")
    text = text.replace(')', "")
    text = text.replace("\"", "")
    text = text.strip()
    text = text.rstrip('.')
    text = unicodedata.normalize('NFKD', try_decode(text))
    return text 
Example 50
Source File: coqa.py    From SogouMRCToolkit with Apache License 2.0 6 votes vote down vote up
def normalize_answer(s):
        """Lower text and remove punctuation, storys and extra whitespace."""

        def remove_articles(text):
            regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
            return re.sub(regex, ' ', text)

        def white_space_fix(text):
            return ' '.join(text.split())

        def remove_punc(text):
            exclude = set(string.punctuation)
            return ''.join(ch for ch in text if ch not in exclude)

        def lower(text):
            return text.lower()

        return white_space_fix(remove_articles(remove_punc(lower(s)))) 
Example 51
Source File: __init__.py    From pycon with MIT License 6 votes vote down vote up
def normalize_position(ticket: CreateOrderTicket, items: dict, questions: dict):
    item = items[ticket.ticket_id]

    data = {
        "item": ticket.ticket_id,
        "variation": ticket.variation,
        "answers": normalize_answers(ticket, questions),
    }

    if ticket.voucher:
        data["voucher"] = ticket.voucher

    if item["admission"]:
        data["attendee_name"] = ticket.attendee_name
        data["attendee_email"] = ticket.attendee_email

    return data 
Example 52
Source File: location.py    From ngsi-timeseries-api with MIT License 6 votes vote down vote up
def normalize_location(entity: Optional[dict]):
    """
    Force GeoJSON for the input entity's location attribute and add the location
    centroid attribute to the entity.
    If no entity is passed in or there's no location attribute or the location
    isn't of a known type, this function won't modify or add the location
    attribute, but will still set the centroid to ``None`` to reflect the fact
    that we're not able to handle the entity's location.

    :param entity: the entity to modify.
    """
    location = LocationAttribute(entity)
    geojson_location = location.as_geojson()

    if geojson_location:
        entity[LOCATION_ATTR_NAME] = geojson_location

        centroid = location.compute_centroid()
        if centroid:
            entity[CENTROID_ATTR_NAME] = centroid.to_ngsi_attribute()

    elif entity:
        entity.pop(CENTROID_ATTR_NAME, None) 
Example 53
Source File: dataloader.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def normalize_image(self):
    """Normalize the image to zero mean and unit variance."""
    # The image normalization is identical to Cloud TPU ResNet.
    self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)
    offset = tf.constant([0.485, 0.456, 0.406])
    offset = tf.expand_dims(offset, axis=0)
    offset = tf.expand_dims(offset, axis=0)
    self._image -= offset

    # This is simlar to `PIXEL_MEANS` in the reference. Reference: https://github.com/ddkang/Detectron/blob/80f329530843e66d07ca39e19901d5f3e5daf009/lib/core/config.py#L909  # pylint: disable=line-too-long
    mlperf_log.maskrcnn_print(key=mlperf_log.INPUT_NORMALIZATION_STD,
                              value=[0.229, 0.224, 0.225])
    scale = tf.constant([0.229, 0.224, 0.225])
    scale = tf.expand_dims(scale, axis=0)
    scale = tf.expand_dims(scale, axis=0)
    self._image /= scale 
Example 54
Source File: generate_forcefields_from_molfile.py    From perses with MIT License 6 votes vote down vote up
def normalize_molecule(mol):
    # Assign aromaticity.
    oechem.OEAssignAromaticFlags(mol, oechem.OEAroModelOpenEye)

    # Add hydrogens.
    oechem.OEAddExplicitHydrogens(mol)

    # Check for any missing atom names, if found reassign all of them.
    if any([atom.GetName() == '' for atom in mol.GetAtoms()]):
        oechem.OETriposAtomNames(mol)

    ofs = oechem.oemolostream('out.mol2')
    ofs.SetFormat(oechem.OEFormat_MOL2H)
    oechem.OEWriteMolecule(ofs, mol)
    ofs.close()

    return mol 
Example 55
Source File: my_dnn_mitdb.py    From ecg-classification with GNU General Public License v3.0 6 votes vote down vote up
def normalize_data(train_data, eval_data):
  feature_size = len(train_data[0])
  if compute_RR_interval_feature:
    feature_size = feature_size - 4

  max_wav = np.amax(np.vstack((train_data[:, 0:feature_size], eval_data[:, 0:feature_size])))
  min_wav = np.amin(np.vstack((train_data[:, 0:feature_size], eval_data[:, 0:feature_size])))
    
  train_data[:, 0:feature_size] = ((train_data[:,0:feature_size] - min_wav) / (max_wav - min_wav))

  eval_data[:, 0:feature_size] = ((eval_data[:,0:feature_size] - min_wav) / (max_wav - min_wav))
  #Norm last part feature: RR interval 
  if compute_RR_interval_feature:

    max_rr = np.amax(np.vstack((train_data[:, feature_size:], eval_data[:, feature_size:])))
    min_rr = np.amin(np.vstack((train_data[:, feature_size:], eval_data[:, feature_size:])))

    train_data[:, feature_size:] = ((train_data[:, feature_size:] - min_rr) / (max_rr - min_rr))
    eval_data[:,  feature_size:] = ((eval_data[:, feature_size:] - min_rr) / (max_rr - min_rr))
  return (train_data, eval_data) 
Example 56
Source File: utils.py    From tensorflow-rnn-ctc with MIT License 6 votes vote down vote up
def normalize_text(text, remove_apostrophe=True):
    """
    Normalize given text.

    Args:
        text: string.
            Given text.
        remove_apostrophe: bool.
            Whether to remove apostrophe in given text.
    Returns:
        string.
            Normalized text.
    """

    # Convert unicode characters to ASCII.
    result = unicodedata.normalize("NFKD", text).encode("ascii", "ignore").decode()

    # Remove apostrophes.
    if remove_apostrophe:
        result = result.replace("'", "")

    return re.sub("[^a-zA-Z']+", ' ', result).strip().lower() 
Example 57
Source File: tools.py    From argoverse_baselinetracker with MIT License 6 votes vote down vote up
def get_pc_normalize(pc):

    pc_avg = pc.sum(axis=0) / len(pc)
    pc = pc - pc_avg

    cov = np.cov(pc.transpose(1, 0))
    evals, evecs = np.linalg.eig(cov)

    sort_indices = np.argsort(evals)[::-1]
    x_n = evecs[:, sort_indices[0]]  # Eigenvector with largest eigenvalue
    y_n = evecs[:, sort_indices[1]]
    z_n = evecs[:, sort_indices[2]]

    return get_camera_matrix(
        pc_avg[:, np.newaxis],
        x_n[:, np.newaxis],
        y_n[:, np.newaxis],
        z_n[:, np.newaxis],
    ) 
Example 58
Source File: tools.py    From argoverse_baselinetracker with MIT License 6 votes vote down vote up
def get_pc_normalize_small(pc):

    pc_avg = pc.sum(axis=0) / len(pc)
    pc = pc - pc_avg

    cov = np.cov(pc.transpose(1, 0))
    evals, evecs = np.linalg.eig(cov)

    sort_indices = np.argsort(evals)[::0]
    y_n = evecs[:, sort_indices[0]]  # Eigenvector with largest eigenvalue
    x_n = evecs[:, sort_indices[1]]
    z_n = evecs[:, sort_indices[2]]

    return get_camera_matrix(
        pc_avg[:, np.newaxis],
        x_n[:, np.newaxis],
        y_n[:, np.newaxis],
        z_n[:, np.newaxis],
    ) 
Example 59
Source File: dataset.py    From pycopia with Apache License 2.0 6 votes vote down vote up
def normalize_time(measurements, start=None, offset=None):
    """Normalize a data array containing timestamps as the first data column
    to start at zero time.

    Args:
        measurements (array) a numpy array containing a measurement set.
        start (float) an optional absolute time stamp to take as start time.
        offset (float) an optional relative time to offset the result.
    """
    if start is None:
        start = measurements[0][0]
    measurements = measurements.transpose()
    # Subtract the first timestamp value from all timestamp values.
    measurements[0] = measurements[0] - start
    if offset is not None:
        measurements[0] = measurements[0] + offset
    return measurements.transpose()