Python numpy.place() Examples

The following are 30 code examples of numpy.place(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: cifar10_query_based.py    From blackbox-attacks with MIT License 6 votes vote down vote up
def one_shot_method(prediction, x, curr_sample, curr_target, p_t):
    grad_est = np.zeros((BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
    DELTA = np.random.randint(2, size=(BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
    np.place(DELTA, DELTA==0, -1)

    y_plus = np.clip(curr_sample + args.delta * DELTA, CLIP_MIN, CLIP_MAX)
    y_minus = np.clip(curr_sample - args.delta * DELTA, CLIP_MIN, CLIP_MAX)

    if args.CW_loss == 0:
        pred_plus = K.get_session().run([prediction], feed_dict={x: y_plus, K.learning_phase(): 0})[0]
        pred_plus_t = pred_plus[np.arange(BATCH_SIZE), list(curr_target)]

        pred_minus = K.get_session().run([prediction], feed_dict={x: y_minus, K.learning_phase(): 0})[0]
        pred_minus_t = pred_minus[np.arange(BATCH_SIZE), list(curr_target)]

        num_est = (pred_plus_t - pred_minus_t)

    grad_est = num_est[:, None, None, None]/(args.delta * DELTA)

    # Getting gradient of the loss
    if args.CW_loss == 0:
        loss_grad = -1.0 * grad_est/p_t[:, None, None, None]

    return loss_grad 
Example #2
Source File: normalizer.py    From astroNN with MIT License 6 votes vote down vote up
def denormalize(self, data):
        data_array, dict_flag = self.mode_checker(data)
        for name in data_array.keys():  # normalize data for each named inputs
            magic_mask = [data_array[name] == MAGIC_NUMBER]

            if self._custom_denorm_func is not None:
                data_array[name] = self._custom_denorm_func(data_array[name])
            data_array[name] *= self.std_labels[name]
            data_array[name] += self.mean_labels[name]

            np.place(data_array[name], magic_mask, MAGIC_NUMBER)

        if not dict_flag:
            data_array = data_array["Temp"]
            self.mean_labels = self.mean_labels['Temp']
            self.std_labels = self.std_labels['Temp']

        return data_array 
Example #3
Source File: mstats_basic.py    From Computable with MIT License 6 votes vote down vote up
def kurtosis(a, axis=0, fisher=True, bias=True):
    a, axis = _chk_asarray(a, axis)
    m2 = moment(a,2,axis)
    m4 = moment(a,4,axis)
    olderr = np.seterr(all='ignore')
    try:
        vals = ma.where(m2 == 0, 0, m4 / m2**2.0)
    finally:
        np.seterr(**olderr)

    if not bias:
        n = a.count(axis)
        can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0)
        if can_correct.any():
            n = np.extract(can_correct, n)
            m2 = np.extract(can_correct, m2)
            m4 = np.extract(can_correct, m4)
            nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
            np.place(vals, can_correct, nval+3.0)
    if fisher:
        return vals - 3
    else:
        return vals 
Example #4
Source File: grid.py    From pysheds with GNU General Public License v3.0 6 votes vote down vote up
def set_nodata(self, data_name, new_nodata, old_nodata=None):
        """
        Change nodata value of a dataset.
 
        Parameters
        ----------
        data_name : string
                    Attribute name of dataset to change.
        new_nodata : int or float
                     New nodata value to use.
        old_nodata : int or float (optional)
                     If none provided, defaults to
                     self.<data_name>.<nodata>
        """
        if old_nodata is None:
            old_nodata = getattr(self, data_name).nodata
        data = getattr(self, data_name)
        if np.isnan(old_nodata):
            np.place(data, np.isnan(data), new_nodata)
        else:
            np.place(data, data == old_nodata, new_nodata)
        data.nodata = new_nodata 
Example #5
Source File: svhn_cls_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def _get_data(self):
        if any(not os.path.exists(path) or not check_sha1(path, sha1) for path, sha1 in
               ((os.path.join(self._root, name), sha1) for _, name, sha1 in self._train_data + self._test_data)):
            for url, _, sha1 in self._train_data + self._test_data:
                download(url=url, path=self._root, sha1_hash=sha1)

        if self._mode == "train":
            data_files = self._train_data[0]
        else:
            data_files = self._test_data[0]

        import scipy.io as sio

        loaded_mat = sio.loadmat(os.path.join(self._root, data_files[1]))

        data = loaded_mat["X"]
        data = np.transpose(data, (3, 0, 1, 2))
        self._data = mx.nd.array(data, dtype=data.dtype)

        self._label = loaded_mat["y"].astype(np.int32).squeeze()
        np.place(self._label, self._label == 10, 0) 
Example #6
Source File: grid.py    From pysheds with GNU General Public License v3.0 6 votes vote down vote up
def _flatten_fdir(self, fdir, flat_idx, dirmap, copy=False):
        # WARNING: This modifies fdir in place if copy is set to False!
        if copy:
            fdir = fdir.copy()
        shape = fdir.shape
        go_to = (
             0 - shape[1],
             1 - shape[1],
             1 + 0,
             1 + shape[1],
             0 + shape[1],
            -1 + shape[1],
            -1 + 0,
            -1 - shape[1]
            )
        gotomap = dict(zip(dirmap, go_to))
        for k, v in gotomap.items():
            fdir[fdir == k] = v
        fdir.flat[flat_idx] += flat_idx 
Example #7
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def _cdf(self, x, c):
        output = np.zeros(x.shape, dtype=x.dtype)
        val = (1.0+c)/(1.0-c)
        c1 = x < np.pi
        c2 = 1-c1
        xp = np.extract(c1, x)
        xn = np.extract(c2, x)
        if np.any(xn):
            valn = np.extract(c2, np.ones_like(x)*val)
            xn = 2*np.pi - xn
            yn = np.tan(xn/2.0)
            on = 1.0-1.0/np.pi*np.arctan(valn*yn)
            np.place(output, c2, on)
        if np.any(xp):
            valp = np.extract(c1, np.ones_like(x)*val)
            yp = np.tan(xp/2.0)
            op = 1.0/np.pi*np.arctan(valp*yp)
            np.place(output, c1, op)
        return output 
Example #8
Source File: scaling.py    From CatLearn with GNU General Public License v3.0 5 votes vote down vote up
def unit_length(train_matrix, test_matrix=None, local=True):
    """Normalize each feature vector relative to the Euclidean length.

    Parameters
    ----------
    train_matrix : list
        Feature matrix for the training dataset.
    test_matrix : list
        Feature matrix for the test dataset.
    local : boolean
        Define whether to scale locally or globally.
    """
    train_matrix = np.transpose(train_matrix)
    if test_matrix is not None:
        test_matrix = np.transpose(test_matrix)

    scale = defaultdict(list)

    scale['length_train'] = np.linalg.norm(train_matrix, axis=0)
    np.place(scale['length_train'], scale['length_train'] == 0., [1.])
    scale['train'] = np.transpose(train_matrix / scale['length_train'])

    if test_matrix is not None:
        scale['length_test'] = np.linalg.norm(test_matrix, axis=0)
        np.place(scale['length_test'], scale['length_test'] == 0., [1.])
        test_matrix = np.transpose(test_matrix / scale['length_test'])
    scale['test'] = test_matrix

    return scale 
Example #9
Source File: scaling.py    From CatLearn with GNU General Public License v3.0 5 votes vote down vote up
def standardize(train_matrix, test_matrix=None, mean=None, std=None,
                local=True):
    """Standardize each feature relative to the mean and standard deviation.

    Parameters
    ----------
    train_matrix : array
        Feature matrix for the training dataset.
    test_matrix : array
        Feature matrix for the test dataset.
    mean : list
        List of mean values for each feature.
    std : list
        List of standard deviation values for each feature.
    local : boolean
        Define whether to scale locally or globally.
    """
    scale = defaultdict(list)
    if test_matrix is not None and not local:
        data = np.concatenate((train_matrix, test_matrix), axis=0)
    else:
        data = np.array(train_matrix)

    if mean is None:
        mean = np.mean(data, axis=0)
    scale['mean'] = mean
    if std is None:
        std = np.std(data, axis=0)
    scale['std'] = std
    np.place(scale['std'], scale['std'] == 0., [1.])  # Replace 0 with 1.

    scale['train'] = (train_matrix - scale['mean']) / scale['std']

    if test_matrix is not None:
        test_matrix = (test_matrix - scale['mean']) / scale['std']
    scale['test'] = test_matrix

    return scale 
Example #10
Source File: ReplaceNulls.py    From raster-functions with Apache License 2.0 5 votes vote down vote up
def updatePixels(self, tlc, shape, props, **pixelBlocks):

        pix_array = np.asarray(pixelBlocks['raster_pixels'])
        np.place(pix_array, pix_array==0, [self.fill_val])

        mask      = np.ones(pix_array.shape)
        pixelBlocks['output_mask'] = mask.astype('u1', copy = False)
        pixelBlocks['output_pixels'] = pix_array.astype(props['pixelType'], copy=True)


        return pixelBlocks 
Example #11
Source File: timetables.py    From metrics-mvp with MIT License 5 votes vote down vote up
def match_actual_times_to_schedule(actual_times, scheduled_times) -> pd.DataFrame:

    scheduled_headways = np.r_[np.nan, metrics.compute_headway_minutes(scheduled_times)]

    next_scheduled_time_indices = np.searchsorted(scheduled_times, actual_times)
    scheduled_times_padded = np.r_[scheduled_times, np.nan]
    scheduled_headways_padded = np.r_[scheduled_headways, np.nan]

    next_scheduled_times = scheduled_times_padded[next_scheduled_time_indices]
    next_scheduled_headways = scheduled_headways_padded[next_scheduled_time_indices]

    prev_scheduled_times = np.r_[np.nan, scheduled_times][next_scheduled_time_indices]
    prev_scheduled_headways = np.r_[np.nan, scheduled_headways][next_scheduled_time_indices]

    if len(actual_times):
        next_scheduled_time_deltas = actual_times - next_scheduled_times
        prev_scheduled_time_deltas = actual_times - prev_scheduled_times

        np.place(prev_scheduled_time_deltas, np.isnan(prev_scheduled_time_deltas), np.inf)
        np.place(next_scheduled_time_deltas, np.isnan(next_scheduled_time_deltas), -np.inf)

        is_next_closer = (prev_scheduled_time_deltas >= -next_scheduled_time_deltas)
    else:
        is_next_closer = False

    closest_scheduled_times = np.where(is_next_closer, next_scheduled_times, prev_scheduled_times)
    closest_scheduled_headways = np.where(is_next_closer, next_scheduled_headways, prev_scheduled_headways)

    return pd.DataFrame({
        'next_scheduled_time': next_scheduled_times,
        'prev_scheduled_time': prev_scheduled_times,
        'closest_scheduled_time': closest_scheduled_times,
        'closest_scheduled_delta': actual_times - closest_scheduled_times,
        'closest_scheduled_headway': closest_scheduled_headways,
    }) 
Example #12
Source File: input_matrix.py    From CityEnergyAnalyst with MIT License 5 votes vote down vote up
def get_array_internal_loads_variables(schedules, tsd, building):
    '''
    this function collects the internal loads
    :param schedules: schedules profile
    :param tsd: building properties struct
    :param building: the intended building dataset
    :return: array of all internal gains (array_int_load)
    '''
    #   electricity gains(appliances, datacenter, lighting, process and refrigration)
    array_electricity = tsd['Eaf'] + tsd['Edataf'] + tsd['Elf'] + tsd['Eprof'] + tsd['Eref']
    #   sensible gains
    np.place(tsd['Qhprof'], np.isnan(tsd['Qhprof']), 0)
    array_sensible_gain = tsd['Qs'] + tsd['Qhprof']
    #   latent gains
    array_latent_gain = tsd['w_int']
    #   solar gains
    for t in range(HOURS_IN_YEAR):
        tsd['I_sol_and_I_rad'][t], tsd['I_rad'][t], tsd['I_sol'][t] =calc_I_sol(t, building, tsd)
    array_solar_gain=tsd['I_sol_and_I_rad']
    #   ventilation loss
    array_ve = tsd['ve']
    #   DHW gain
    array_Vww = schedules['Vww']
    #   concatenate internal loads arrays
    array_int_load = np.column_stack((array_electricity, array_sensible_gain, array_latent_gain, array_solar_gain, array_ve, array_Vww))

    return array_int_load 
Example #13
Source File: input_matrix.py    From CityEnergyAnalyst with MIT License 5 votes vote down vote up
def get_array_comfort_variables(building, date, schedules_dict, weather_data,use_stochastic_occupancy):
    '''
    this function collects comfort/setpoint chatacteristics
    :param building: the intended building dataset
    :param date: date file
    :param gv: global variables
    :param schedules_dict: schedules profile
    :param weather_data: weather data
    :return: array of setpoint properties for each hour of the year (array_cmfrts, schedules, tsd)
    '''
    #   collect schedules
    schedules, tsd = initialize_inputs(building, schedules_dict, weather_data,use_stochastic_occupancy)
    #   calculate seoasonal setpoint
    tsd = control_heating_cooling_systems.get_temperature_setpoints_incl_seasonality(tsd, building, date.dayofweek)
    #   replace NaNs values with -100 for heating set point and 100 for cooling set point (it implies no setpoint)
    np.place(tsd['ta_hs_set'], np.isnan(tsd['ta_hs_set']), -100)
    np.place(tsd['ta_cs_set'], np.isnan(tsd['ta_cs_set']), 100)
    array_Thset = tsd['ta_hs_set']
    array_Tcset = tsd['ta_cs_set']
    #   create a single vector of setpoint temperatures
    array_cmfrt = np.empty((1, HOURS_IN_YEAR))
    seasonhours = [3216, 6192]
    array_cmfrt[0, :] = array_Thset
    array_cmfrt[0, seasonhours[0] + 1:seasonhours[1]] = array_Tcset[seasonhours[0] + 1:seasonhours[1]]
    array_cmfrt[:,:]=array_Tcset
    # todo: change the comfort array to match other than singapore
    array_HVAC_status=np.where(array_cmfrt > 99, 0,
             (np.where(array_cmfrt < -99, 0, 1)))
    #   an array of HVAC availability during winter
    array_HVAC_heating = np.empty((1, HOURS_IN_YEAR))
    array_HVAC_heating[0,:] = np.where(array_Thset < -99, 0,1)
    #   an array of HVAC availability during summer
    array_HVAC_cooling = np.empty((1, HOURS_IN_YEAR))
    array_HVAC_cooling[0,:] = np.where(array_Tcset > 99, 0, 1)
    #   concatenate comfort arrays
    array_cmfrts=np.concatenate((array_cmfrt,array_HVAC_status),axis=0)
    array_cmfrts=np.concatenate((array_cmfrts,array_HVAC_heating),axis=0)
    array_cmfrts=np.concatenate((array_cmfrts,array_HVAC_cooling),axis=0)

    return array_cmfrts, schedules, tsd 
Example #14
Source File: function_base.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def _update_dim_sizes(dim_sizes, arg, core_dims):
    """
    Incrementally check and update core dimension sizes for a single argument.

    Arguments
    ---------
    dim_sizes : Dict[str, int]
        Sizes of existing core dimensions. Will be updated in-place.
    arg : ndarray
        Argument to examine.
    core_dims : Tuple[str, ...]
        Core dimensions for this argument.
    """
    if not core_dims:
        return

    num_core_dims = len(core_dims)
    if arg.ndim < num_core_dims:
        raise ValueError(
            '%d-dimensional argument does not have enough '
            'dimensions for all core dimensions %r'
            % (arg.ndim, core_dims))

    core_shape = arg.shape[-num_core_dims:]
    for dim, size in zip(core_dims, core_shape):
        if dim in dim_sizes:
            if size != dim_sizes[dim]:
                raise ValueError(
                    'inconsistent size for core dimension %r: %r vs %r'
                    % (dim, size, dim_sizes[dim]))
        else:
            dim_sizes[dim] = size 
Example #15
Source File: scaling.py    From CatLearn with GNU General Public License v3.0 5 votes vote down vote up
def min_max(train_matrix, test_matrix=None, local=True):
    """Normalize each feature relative to the min and max.

    Parameters
    ----------
    train_matrix : list
        Feature matrix for the training dataset.
    test_matrix : list
        Feature matrix for the test dataset.
    local : boolean
        Define whether to scale locally or globally.
    """
    scale = defaultdict(list)
    if test_matrix is not None and not local:
        data = np.concatenate((train_matrix, test_matrix), axis=0)
    else:
        data = train_matrix
    scale['min'] = np.min(data, axis=0)
    scale['dif'] = np.max(data, axis=0) - scale['min']
    np.place(scale['dif'], scale['dif'] == 0., [1.])  # Replace 0 with 1.

    scale['train'] = (train_matrix - scale['min']) / scale['dif']

    if test_matrix is not None:
        test_matrix = (test_matrix - scale['min']) / scale['dif']
    scale['test'] = test_matrix

    return scale 
Example #16
Source File: scaling.py    From CatLearn with GNU General Public License v3.0 5 votes vote down vote up
def normalize(train_matrix, test_matrix=None, mean=None, dif=None, local=True):
    """Normalize each feature relative to mean and min/max variance.

    Parameters
    ----------
    train_matrix : list
        Feature matrix for the training dataset.
    test_matrix : list
        Feature matrix for the test dataset.
    local : boolean
        Define whether to scale locally or globally.
    mean : list
        List of mean values for each feature.
    dif : list
        List of max-min values for each feature.
    """
    scale = defaultdict(list)
    if test_matrix is not None and not local:
        data = np.concatenate((train_matrix, test_matrix), axis=0)
    else:
        data = train_matrix

    if mean is None:
        mean = np.mean(data, axis=0)
    scale['mean'] = mean
    if dif is None:
        dif = np.max(data, axis=0) - np.min(data, axis=0)
    scale['dif'] = dif
    np.place(scale['dif'], scale['dif'] == 0., [1.])  # Replace 0 with 1.

    scale['train'] = (train_matrix - scale['mean']) / scale['dif']

    if test_matrix is not None:
        test_matrix = (test_matrix - scale['mean']) / scale['dif']
    scale['test'] = test_matrix

    return scale 
Example #17
Source File: function_base.py    From recruit with Apache License 2.0 5 votes vote down vote up
def place(arr, mask, vals):
    """
    Change elements of an array based on conditional and input values.

    Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
    `place` uses the first N elements of `vals`, where N is the number of
    True values in `mask`, while `copyto` uses the elements where `mask`
    is True.

    Note that `extract` does the exact opposite of `place`.

    Parameters
    ----------
    arr : ndarray
        Array to put data into.
    mask : array_like
        Boolean mask array. Must have the same size as `a`.
    vals : 1-D sequence
        Values to put into `a`. Only the first N elements are used, where
        N is the number of True values in `mask`. If `vals` is smaller
        than N, it will be repeated, and if elements of `a` are to be masked,
        this sequence must be non-empty.

    See Also
    --------
    copyto, put, take, extract

    Examples
    --------
    >>> arr = np.arange(6).reshape(2, 3)
    >>> np.place(arr, arr>2, [44, 55])
    >>> arr
    array([[ 0,  1,  2],
           [44, 55, 44]])

    """
    if not isinstance(arr, np.ndarray):
        raise TypeError("argument 1 must be numpy.ndarray, "
                        "not {name}".format(name=type(arr).__name__))

    return _insert(arr, mask, vals) 
Example #18
Source File: function_base.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _update_dim_sizes(dim_sizes, arg, core_dims):
    """
    Incrementally check and update core dimension sizes for a single argument.

    Arguments
    ---------
    dim_sizes : Dict[str, int]
        Sizes of existing core dimensions. Will be updated in-place.
    arg : ndarray
        Argument to examine.
    core_dims : Tuple[str, ...]
        Core dimensions for this argument.
    """
    if not core_dims:
        return

    num_core_dims = len(core_dims)
    if arg.ndim < num_core_dims:
        raise ValueError(
            '%d-dimensional argument does not have enough '
            'dimensions for all core dimensions %r'
            % (arg.ndim, core_dims))

    core_shape = arg.shape[-num_core_dims:]
    for dim, size in zip(core_dims, core_shape):
        if dim in dim_sizes:
            if size != dim_sizes[dim]:
                raise ValueError(
                    'inconsistent size for core dimension %r: %r vs %r'
                    % (dim, size, dim_sizes[dim]))
        else:
            dim_sizes[dim] = size 
Example #19
Source File: function_base.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def add_newdoc(place, obj, doc):
    """
    Adds documentation to obj which is in module place.

    If doc is a string add it to obj as a docstring

    If doc is a tuple, then the first element is interpreted as
       an attribute of obj and the second as the docstring
          (method, docstring)

    If doc is a list, then each element of the list should be a
       sequence of length two --> [(method1, docstring1),
       (method2, docstring2), ...]

    This routine never raises an error.

    This routine cannot modify read-only docstrings, as appear
    in new-style classes or built-in functions. Because this
    routine never raises an error the caller must check manually
    that the docstrings were changed.
    """
    try:
        new = getattr(__import__(place, globals(), {}, [obj]), obj)
        if isinstance(doc, str):
            add_docstring(new, doc.strip())
        elif isinstance(doc, tuple):
            add_docstring(getattr(new, doc[0]), doc[1].strip())
        elif isinstance(doc, list):
            for val in doc:
                add_docstring(getattr(new, val[0]), val[1].strip())
    except Exception:
        pass


# Based on scitools meshgrid 
Example #20
Source File: base.py    From pyiron with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def remap_indices(self, lammps_indices):
        """
        Give the Lammps-dumped indices, re-maps these back onto the structure's indices to preserve the species.

        The issue is that for an N-element potential, Lammps dumps the chemical index from 1 to N based on the order
        that these species are written in the Lammps input file. But the indices for a given structure are based on the
        order in which chemical species were added to that structure, and run from 0 up to the number of species
        currently in that structure. Therefore we need to be a little careful with mapping.

        Args:
            indices (numpy.ndarray/list): The Lammps-dumped integers.

        Returns:
            numpy.ndarray: Those integers mapped onto the structure.
        """
        lammps_symbol_order = np.array(self.input.potential.get_element_lst())

        # If new Lammps indices are present for which we have no species, extend the species list
        unique_lammps_indices = np.unique(lammps_indices)
        if len(unique_lammps_indices) > len(np.unique(self.structure.indices)):
            unique_lammps_indices -= 1  # Convert from Lammps start counting at 1 to python start counting at 0
            new_lammps_symbols = lammps_symbol_order[unique_lammps_indices]
            self.structure.set_species([self.structure.convert_element(el) for el in new_lammps_symbols])

        # Create a map between the lammps indices and structure indices to preserve species
        structure_symbol_order = np.array([el.Abbreviation for el in self.structure.species])
        map_ = np.array([int(np.argwhere(lammps_symbol_order == symbol)[0]) + 1 for symbol in structure_symbol_order])

        structure_indices = np.array(lammps_indices)
        for i_struct, i_lammps in enumerate(map_):
            np.place(structure_indices, lammps_indices == i_lammps, i_struct)
        # TODO: Vectorize this for-loop for computational efficiency

        return structure_indices 
Example #21
Source File: __init__.py    From kite with GNU General Public License v3.0 5 votes vote down vote up
def derampGMatrix(displ):
    """ Deramp through lsq a bilinear plane
    Data is also de-meaned
    """
    if displ.ndim != 2:
        raise TypeError('Displacement has to be 2-dim array')

    # form a relative coordinate grid
    c_grid = num.mgrid[0:displ.shape[0], 0:displ.shape[1]]

    # separate and flatten coordinate grid into x and y vectors for each !point
    ix = c_grid[0].flat
    iy = c_grid[1].flat
    displ_f = displ.flat

    # reduce vectors taking out all NaN's
    displ_nonan = displ_f[num.isfinite(displ_f)]
    ix = ix[num.isfinite(displ_f)]
    iy = iy[num.isfinite(displ_f)]

    # form kernel/design derampMatrix (c, x, y)
    GT = num.matrix([num.ones(len(ix)), ix, iy])
    G = GT.T

    # generalized kernel matrix (quadtratic)
    GTG = GT * G
    # generalized inverse
    GTGinv = GTG.I

    # lsq estimates of ramp parameter
    ramp_paras = displ_nonan * (GTGinv * GT).T

    # ramp values
    ramp_nonan = ramp_paras * GT
    ramp_f = num.multiply(displ_f, 0.)

    # insert ramp values in full vectors
    num.place(ramp_f, num.isfinite(displ_f), num.array(ramp_nonan).flatten())
    ramp_f = ramp_f.reshape(displ.shape[0], displ.shape[1])

    return displ - ramp_f 
Example #22
Source File: source.py    From python-musical with MIT License 5 votes vote down vote up
def _square(t, duty=0.5):
    ''' Generate square wave from wave input array with specific 'duty'.
    '''
    y = numpy.zeros(t.shape)
    tmod = numpy.mod(t, 2 * numpy.pi)
    mask = tmod < duty * 2 * numpy.pi
    numpy.place(y, mask, 1)
    numpy.place(y, (1 - mask), -1)
    return y 
Example #23
Source File: function_base.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def place(arr, mask, vals):
    """
    Change elements of an array based on conditional and input values.

    Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
    `place` uses the first N elements of `vals`, where N is the number of
    True values in `mask`, while `copyto` uses the elements where `mask`
    is True.

    Note that `extract` does the exact opposite of `place`.

    Parameters
    ----------
    arr : ndarray
        Array to put data into.
    mask : array_like
        Boolean mask array. Must have the same size as `a`.
    vals : 1-D sequence
        Values to put into `a`. Only the first N elements are used, where
        N is the number of True values in `mask`. If `vals` is smaller
        than N, it will be repeated, and if elements of `a` are to be masked,
        this sequence must be non-empty.

    See Also
    --------
    copyto, put, take, extract

    Examples
    --------
    >>> arr = np.arange(6).reshape(2, 3)
    >>> np.place(arr, arr>2, [44, 55])
    >>> arr
    array([[ 0,  1,  2],
           [44, 55, 44]])

    """
    if not isinstance(arr, np.ndarray):
        raise TypeError("argument 1 must be numpy.ndarray, "
                        "not {name}".format(name=type(arr).__name__))

    return _insert(arr, mask, vals) 
Example #24
Source File: function_base.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def place(arr, mask, vals):
    """
    Change elements of an array based on conditional and input values.

    Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
    `place` uses the first N elements of `vals`, where N is the number of
    True values in `mask`, while `copyto` uses the elements where `mask`
    is True.

    Note that `extract` does the exact opposite of `place`.

    Parameters
    ----------
    arr : ndarray
        Array to put data into.
    mask : array_like
        Boolean mask array. Must have the same size as `a`.
    vals : 1-D sequence
        Values to put into `a`. Only the first N elements are used, where
        N is the number of True values in `mask`. If `vals` is smaller
        than N, it will be repeated, and if elements of `a` are to be masked,
        this sequence must be non-empty.

    See Also
    --------
    copyto, put, take, extract

    Examples
    --------
    >>> arr = np.arange(6).reshape(2, 3)
    >>> np.place(arr, arr>2, [44, 55])
    >>> arr
    array([[ 0,  1,  2],
           [44, 55, 44]])

    """
    if not isinstance(arr, np.ndarray):
        raise TypeError("argument 1 must be numpy.ndarray, "
                        "not {name}".format(name=type(arr).__name__))

    return _insert(arr, mask, vals) 
Example #25
Source File: svhn_cls_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def get_svhn_data(root,
                  mode):
    """
    SVHN image classification dataset from http://ufldl.stanford.edu/housenumbers/.
    Each sample is an image (in 3D NDArray) with shape (32, 32, 3).
    Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
    we assign the label `0` to the digit `0`.

    Parameters
    ----------
    root : str
        Path to temp folder for storing data.
    mode : str
        'train', 'val', or 'test'.
    """
    _train_data = [("http://ufldl.stanford.edu/housenumbers/train_32x32.mat", "train_32x32.mat",
                    "e6588cae42a1a5ab5efe608cc5cd3fb9aaffd674")]
    _test_data = [("http://ufldl.stanford.edu/housenumbers/test_32x32.mat", "test_32x32.mat",
                   "29b312382ca6b9fba48d41a7b5c19ad9a5462b20")]

    if any(not os.path.exists(path) or not _check_sha1(path, sha1) for path, sha1 in
           ((os.path.join(root, name), sha1) for _, name, sha1 in _train_data + _test_data)):
        for url, _, sha1 in _train_data + _test_data:
            _download(url=url, path=root, sha1_hash=sha1)

    if mode == "train":
        data_files = _train_data[0]
    else:
        data_files = _test_data[0]

    import scipy.io as sio
    loaded_mat = sio.loadmat(os.path.join(root, data_files[1]))

    data = loaded_mat["X"]
    data = np.transpose(data, (3, 0, 1, 2))
    label = loaded_mat["y"].astype(np.int32).squeeze()
    np.place(label, label == 10, 0)

    return data, label 
Example #26
Source File: function_base.py    From Computable with MIT License 5 votes vote down vote up
def add_newdoc(place, obj, doc):
    """Adds documentation to obj which is in module place.

    If doc is a string add it to obj as a docstring

    If doc is a tuple, then the first element is interpreted as
       an attribute of obj and the second as the docstring
          (method, docstring)

    If doc is a list, then each element of the list should be a
       sequence of length two --> [(method1, docstring1),
       (method2, docstring2), ...]

    This routine never raises an error.

    This routine cannot modify read-only docstrings, as appear
    in new-style classes or built-in functions. Because this
    routine never raises an error the caller must check manually
    that the docstrings were changed.
       """
    try:
        new = {}
        exec('from %s import %s' % (place, obj), new)
        if isinstance(doc, str):
            add_docstring(new[obj], doc.strip())
        elif isinstance(doc, tuple):
            add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
        elif isinstance(doc, list):
            for val in doc:
                add_docstring(getattr(new[obj], val[0]), val[1].strip())
    except:
        pass


# Based on scitools meshgrid 
Example #27
Source File: function_base.py    From Computable with MIT License 5 votes vote down vote up
def place(arr, mask, vals):
    """
    Change elements of an array based on conditional and input values.

    Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
    `place` uses the first N elements of `vals`, where N is the number of
    True values in `mask`, while `copyto` uses the elements where `mask`
    is True.

    Note that `extract` does the exact opposite of `place`.

    Parameters
    ----------
    arr : array_like
        Array to put data into.
    mask : array_like
        Boolean mask array. Must have the same size as `a`.
    vals : 1-D sequence
        Values to put into `a`. Only the first N elements are used, where
        N is the number of True values in `mask`. If `vals` is smaller
        than N it will be repeated.

    See Also
    --------
    copyto, put, take, extract

    Examples
    --------
    >>> arr = np.arange(6).reshape(2, 3)
    >>> np.place(arr, arr>2, [44, 55])
    >>> arr
    array([[ 0,  1,  2],
           [44, 55, 44]])

    """
    return _insert(arr, mask, vals) 
Example #28
Source File: svhn.py    From Global-Second-order-Pooling-Convolutional-Networks with MIT License 5 votes vote down vote up
def __init__(self, root, split='train',
                 transform=None, target_transform=None, download=False):
        self.root = os.path.expanduser(root)
        self.transform = transform
        self.target_transform = target_transform
        self.split = split  # training set or test set or extra set

        if self.split not in self.split_list:
            raise ValueError('Wrong split entered! Please use split="train" '
                             'or split="extra" or split="test"')

        self.url = self.split_list[split][0]
        self.filename = self.split_list[split][1]
        self.file_md5 = self.split_list[split][2]

        if download:
            self.download()

        if not self._check_integrity():
            raise RuntimeError('Dataset not found or corrupted.' +
                               ' You can use download=True to download it')

        # import here rather than at top of file because this is
        # an optional dependency for torchvision
        import scipy.io as sio

        # reading(loading) mat file as array
        loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))

        self.data = loaded_mat['X']
        # loading from the .mat file gives an np array of type np.uint8
        # converting to np.int64, so that we have a LongTensor after
        # the conversion from the numpy array
        # the squeeze is needed to obtain a 1D tensor
        self.labels = loaded_mat['y'].astype(np.int64).squeeze()

        # the svhn dataset assigns the class label "10" to the digit 0
        # this makes it inconsistent with several loss functions
        # which expect the class labels to be in the range [0, C-1]
        np.place(self.labels, self.labels == 10, 0)
        self.data = np.transpose(self.data, (3, 2, 0, 1)) 
Example #29
Source File: function_base.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def add_newdoc(place, obj, doc):
    """
    Adds documentation to obj which is in module place.

    If doc is a string add it to obj as a docstring

    If doc is a tuple, then the first element is interpreted as
       an attribute of obj and the second as the docstring
          (method, docstring)

    If doc is a list, then each element of the list should be a
       sequence of length two --> [(method1, docstring1),
       (method2, docstring2), ...]

    This routine never raises an error.

    This routine cannot modify read-only docstrings, as appear
    in new-style classes or built-in functions. Because this
    routine never raises an error the caller must check manually
    that the docstrings were changed.
    """
    try:
        new = getattr(__import__(place, globals(), {}, [obj]), obj)
        if isinstance(doc, str):
            add_docstring(new, doc.strip())
        elif isinstance(doc, tuple):
            add_docstring(getattr(new, doc[0]), doc[1].strip())
        elif isinstance(doc, list):
            for val in doc:
                add_docstring(getattr(new, val[0]), val[1].strip())
    except Exception:
        pass


# Based on scitools meshgrid 
Example #30
Source File: function_base.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def _update_dim_sizes(dim_sizes, arg, core_dims):
    """
    Incrementally check and update core dimension sizes for a single argument.

    Arguments
    ---------
    dim_sizes : Dict[str, int]
        Sizes of existing core dimensions. Will be updated in-place.
    arg : ndarray
        Argument to examine.
    core_dims : Tuple[str, ...]
        Core dimensions for this argument.
    """
    if not core_dims:
        return

    num_core_dims = len(core_dims)
    if arg.ndim < num_core_dims:
        raise ValueError(
            '%d-dimensional argument does not have enough '
            'dimensions for all core dimensions %r'
            % (arg.ndim, core_dims))

    core_shape = arg.shape[-num_core_dims:]
    for dim, size in zip(core_dims, core_shape):
        if dim in dim_sizes:
            if size != dim_sizes[dim]:
                raise ValueError(
                    'inconsistent size for core dimension %r: %r vs %r'
                    % (dim, size, dim_sizes[dim]))
        else:
            dim_sizes[dim] = size