Python numpy.ufunc() Examples
The following are 30
code examples of numpy.ufunc().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: series.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def __array_prepare__(self, result, context=None): """ Gets called prior to a ufunc """ # nice error message for non-ufunc types if context is not None and not isinstance(self._values, np.ndarray): obj = context[1][0] raise TypeError("{obj} with dtype {dtype} cannot perform " "the numpy op {op}".format( obj=type(obj).__name__, dtype=getattr(obj, 'dtype', None), op=context[0].__name__)) return result # complex
Example #2
Source File: grads.py From tangent with Apache License 2.0 | 6 votes |
def get_module_functions(modules): """Finds functions that do not have implemented derivatives. Args: modules: A list of Python modules. Functions contained in these modules will be checked for membership in 'implemented', and if not found, will be added to an 'unimplemented' set implemented: A Python object containing implemented derivatives. A function should be checkable for membership using the `fn in implemented` syntax. Returns: module_fns: A set of functions, builtins or ufuncs in `modules`. """ module_fns = set() for module in modules: for key in dir(module): attr = getattr(module, key) if isinstance( attr, (types.BuiltinFunctionType, types.FunctionType, numpy.ufunc)): module_fns.add(attr) return module_fns
Example #3
Source File: xnd_backend.py From unumpy with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __ua_convert__(value, dispatch_type, coerce): if dispatch_type is ufunc and hasattr(fn, value.name): return getattr(fn, value.name) if value is None: return None if dispatch_type is ndarray: if not coerce and not isinstance(value, xnd.xnd): return NotImplemented return convert(value, coerce=coerce) if dispatch_type is dtype: return ndt(str(value)) return NotImplemented
Example #4
Source File: sparse_backend.py From unumpy with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __ua_convert__(value, dispatch_type, coerce): if dispatch_type is ufunc: return getattr(np, value.name) if value is None: return None if dispatch_type is ndarray: if not coerce: if not isinstance(value, sparse.SparseArray): return NotImplemented if isinstance(value, sparse.SparseArray): return value return sparse.as_coo(np.asarray(value)) return value
Example #5
Source File: core.py From sparse with BSD 3-Clause "New" or "Revised" License | 6 votes |
def astype(self, dtype, copy=True): """ Copy of the array, cast to a specified type. See also -------- scipy.sparse.coo_matrix.astype : SciPy sparse equivalent function numpy.ndarray.astype : NumPy equivalent ufunc. :obj:`COO.elemwise`: Apply an arbitrary element-wise function to one or two arguments. """ # this matches numpy's behavior if self.dtype == dtype and not copy: return self return self.__array_ufunc__( np.ndarray.astype, "__call__", self, dtype=dtype, copy=copy )
Example #6
Source File: test_quantity_ufuncs.py From Carnets with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_coverage(self): """Test that we cover all ufunc's""" all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values() if isinstance(ufunc, np.ufunc)]) all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS | set(qh.UFUNC_HELPERS.keys())) # Check that every numpy ufunc is covered. assert all_np_ufuncs - all_q_ufuncs == set() # Check that all ufuncs we cover come from numpy or erfa. # (Since coverage for erfa is incomplete, we do not check # this the other way). all_erfa_ufuncs = set([ufunc for ufunc in erfa_ufunc.__dict__.values() if isinstance(ufunc, np.ufunc)]) assert (all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set())
Example #7
Source File: elemwise.py From D-VAE with MIT License | 6 votes |
def __init__(self, scalar_op, inplace_pattern=None, name=None, nfunc_spec=None, openmp=None): if inplace_pattern is None: inplace_pattern = {} self.name = name self.scalar_op = scalar_op self.inplace_pattern = inplace_pattern self.destroy_map = dict((o, [i]) for o, i in inplace_pattern.items()) self.ufunc = None self.nfunc = None if nfunc_spec is None: nfunc_spec = getattr(scalar_op, 'nfunc_spec', None) self.nfunc_spec = nfunc_spec if nfunc_spec: self.nfunc = getattr(numpy, nfunc_spec[0]) # precompute the hash of this node self._rehash() super(Elemwise, self).__init__(openmp=openmp)
Example #8
Source File: elemwise.py From attention-lvcsr with MIT License | 6 votes |
def __init__(self, scalar_op, inplace_pattern=None, name=None, nfunc_spec=None, openmp=None): if inplace_pattern is None: inplace_pattern = {} self.name = name self.scalar_op = scalar_op self.inplace_pattern = inplace_pattern self.destroy_map = dict((o, [i]) for o, i in inplace_pattern.items()) self.ufunc = None self.nfunc = None if nfunc_spec is None: nfunc_spec = getattr(scalar_op, 'nfunc_spec', None) self.nfunc_spec = nfunc_spec if nfunc_spec: self.nfunc = getattr(numpy, nfunc_spec[0]) # precompute the hash of this node self._rehash() super(Elemwise, self).__init__(openmp=openmp)
Example #9
Source File: accessors.py From koalas with Apache License 2.0 | 6 votes |
def _transform_batch(self, func, return_schema): from databricks.koalas.series import Series from databricks import koalas as ks if isinstance(func, np.ufunc): f = func func = lambda *args, **kwargs: f(*args, **kwargs) if return_schema is None: # TODO: In this case, it avoids the shortcut for now (but only infers schema) # because it returns a series from a different DataFrame and it has a different # anchor. We should fix this to allow the shortcut or only allow to infer # schema. limit = ks.get_option("compute.shortcut_limit") pser = self._kser.head(limit)._to_internal_pandas() transformed = pser.transform(func) kser = Series(transformed) spark_return_type = kser.spark.data_type else: spark_return_type = return_schema pudf = pandas_udf(func, returnType=spark_return_type, functionType=PandasUDFType.SCALAR) return self._kser._with_new_scol(scol=pudf(self._kser.spark.column)).rename(self._kser.name)
Example #10
Source File: test_nan_inputs.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def _get_ufuncs(): ufuncs = [] ufunc_names = [] for name in sorted(sc.__dict__): obj = sc.__dict__[name] if not isinstance(obj, np.ufunc): continue msg = KNOWNFAILURES.get(obj) if msg is None: ufuncs.append(obj) ufunc_names.append(name) else: fail = pytest.mark.xfail(run=False, reason=msg) ufuncs.append(pytest.param(obj, marks=fail)) ufunc_names.append(name) return ufuncs, ufunc_names
Example #11
Source File: elemwise.py From D-VAE with MIT License | 6 votes |
def set_ufunc(self, scalar_op): # This is probably a speed up of the implementation if isinstance(scalar_op, theano.scalar.basic.Add): self.ufunc = numpy.add elif isinstance(scalar_op, theano.scalar.basic.Mul): self.ufunc = numpy.multiply elif isinstance(scalar_op, theano.scalar.basic.Maximum): self.ufunc = numpy.maximum elif isinstance(scalar_op, theano.scalar.basic.Minimum): self.ufunc = numpy.minimum elif isinstance(scalar_op, theano.scalar.basic.AND): self.ufunc = numpy.bitwise_and elif isinstance(scalar_op, theano.scalar.basic.OR): self.ufunc = numpy.bitwise_or elif isinstance(scalar_op, theano.scalar.basic.XOR): self.ufunc = numpy.bitwise_xor else: self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1)
Example #12
Source File: elemwise.py From attention-lvcsr with MIT License | 6 votes |
def set_ufunc(self, scalar_op): # This is probably a speed up of the implementation if isinstance(scalar_op, theano.scalar.basic.Add): self.ufunc = numpy.add elif isinstance(scalar_op, theano.scalar.basic.Mul): self.ufunc = numpy.multiply elif isinstance(scalar_op, theano.scalar.basic.Maximum): self.ufunc = numpy.maximum elif isinstance(scalar_op, theano.scalar.basic.Minimum): self.ufunc = numpy.minimum elif isinstance(scalar_op, theano.scalar.basic.AND): self.ufunc = numpy.bitwise_and elif isinstance(scalar_op, theano.scalar.basic.OR): self.ufunc = numpy.bitwise_or elif isinstance(scalar_op, theano.scalar.basic.XOR): self.ufunc = numpy.bitwise_xor else: self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1)
Example #13
Source File: series.py From elasticintel with GNU General Public License v3.0 | 6 votes |
def __array_prepare__(self, result, context=None): """ Gets called prior to a ufunc """ # nice error message for non-ufunc types if context is not None and not isinstance(self._values, np.ndarray): obj = context[1][0] raise TypeError("{obj} with dtype {dtype} cannot perform " "the numpy op {op}".format( obj=type(obj).__name__, dtype=getattr(obj, 'dtype', None), op=context[0].__name__)) return result # complex
Example #14
Source File: helpers.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def helper_frexp(f, unit): if not unit.is_unity(): raise UnitTypeError("Can only apply '{}' function to " "unscaled dimensionless quantities" .format(f.__name__)) return [None], (None, None) # TWO ARGUMENT UFUNC HELPERS # # The functions below take a two arguments. The output of the helper function # should be two values: a tuple of two converters to be used to scale the # inputs before being passed to the ufunc (None if no conversion is needed), # and the unit the output will be in.
Example #15
Source File: frame.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def apply(self, func, axis=0, broadcast=False, reduce=False): """ Analogous to DataFrame.apply, for SparseDataFrame Parameters ---------- func : function Function to apply to each column axis : {0, 1, 'index', 'columns'} broadcast : bool, default False For aggregation functions, return object of same size with values propagated Returns ------- applied : Series or SparseDataFrame """ if not len(self.columns): return self axis = self._get_axis_number(axis) if isinstance(func, np.ufunc): new_series = {} for k, v in compat.iteritems(self): applied = func(v) applied.fill_value = func(v.fill_value) new_series[k] = applied return self._constructor( new_series, index=self.index, columns=self.columns, default_fill_value=self._default_fill_value, default_kind=self._default_kind).__finalize__(self) else: if not broadcast: return self._apply_standard(func, axis, reduce=reduce) else: return self._apply_broadcast(func, axis)
Example #16
Source File: test_quantity_ufuncs.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_invariant_twoarg_scalar(self, ufunc): q_i1 = 4.7 * u.m q_i2 = 9.4 * u.km q_o = ufunc(q_i1, q_i2) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i1.unit assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
Example #17
Source File: parameters.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _create_value_wrapper(self, wrapper, model): """Wraps a getter/setter function to support optionally passing in a reference to the model object as the second argument. If a model is tied to this parameter and its getter/setter supports a second argument then this creates a partial function using the model instance as the second argument. """ if isinstance(wrapper, np.ufunc): if wrapper.nin != 1: raise TypeError("A numpy.ufunc used for Parameter " "getter/setter may only take one input " "argument") elif wrapper is None: # Just allow non-wrappers to fall through silently, for convenience return None else: inputs, params = get_inputs_and_params(wrapper) nargs = len(inputs) if nargs == 1: pass elif nargs == 2: self._model_required = True if model is not None: # Don't make a partial function unless we're tied to a # specific model instance model_arg = inputs[1].name wrapper = functools.partial(wrapper, **{model_arg: model}) else: raise TypeError("Parameter getter/setter must be a function " "of either one or two arguments") return wrapper
Example #18
Source File: core.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __array_wrap__(self, array, context=None): """ Wrapper for multiplication with numpy arrays. """ if type(context[0]) == np.ufunc: return NotImplemented else: return array
Example #19
Source File: test_quantity_ufuncs.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary): q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s q_o = ufunc(q_i1, arbitrary) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i1.unit assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
Example #20
Source File: test_quantity_ufuncs.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_invariant_array(self, ufunc): q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s q_o = ufunc(q_i) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i.unit assert np.all(q_o.value == ufunc(q_i.value))
Example #21
Source File: series.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def __array_wrap__(self, result, context=None): """ Gets called after a ufunc """ return self._constructor(result, index=self.index, copy=False).__finalize__(self)
Example #22
Source File: array.py From holoviews with BSD 3-Clause "New" or "Revised" License | 5 votes |
def aggregate(cls, dataset, dimensions, function, **kwargs): reindexed = dataset.reindex(dimensions) grouped = (cls.groupby(reindexed, dimensions, list, 'raw') if len(dimensions) else [((), reindexed.data)]) rows = [] for k, group in grouped: if isinstance(function, np.ufunc): reduced = function.reduce(group, axis=0, **kwargs) else: reduced = function(group, axis=0, **kwargs) rows.append(np.concatenate([k, (reduced,) if np.isscalar(reduced) else reduced])) return np.atleast_2d(rows), []
Example #23
Source File: transform.py From holoviews with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _resolve_op(self, op, dataset, data, flat, expanded, ranges, all_values, keep_index, compute, strict): args = op['args'] fn = op['fn'] kwargs = dict(op['kwargs']) fn_name = self._numpy_funcs.get(fn) if fn_name and hasattr(data, fn_name): if 'axis' not in kwargs and not isinstance(fn, np.ufunc): kwargs['axis'] = None fn = fn_name if isinstance(fn, basestring): accessor = kwargs.pop('accessor', None) fn_args = [] else: accessor = False fn_args = [data] for arg in args: if isinstance(arg, dim): arg = arg.apply( dataset, flat, expanded, ranges, all_values, keep_index, compute, strict ) arg = resolve_dependent_value(arg) fn_args.append(arg) fn_kwargs = {} for k, v in kwargs.items(): if isinstance(v, dim): v = v.apply( dataset, flat, expanded, ranges, all_values, keep_index, compute, strict ) fn_kwargs[k] = resolve_dependent_value(v) args = tuple(fn_args[::-1] if op['reverse'] else fn_args) kwargs = dict(fn_kwargs) return fn, fn_name, args, kwargs, accessor
Example #24
Source File: transform.py From holoviews with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __array_ufunc__(self, *args, **kwargs): ufunc = args[0] kwargs = {k: v for k, v in kwargs.items() if v is not None} return type(self)(self, ufunc, **kwargs)
Example #25
Source File: raster.py From holoviews with BSD 3-Clause "New" or "Revised" License | 5 votes |
def reduce(self, dimensions=None, function=None, **reduce_map): """ Reduces the Raster using functions provided via the kwargs, where the keyword is the dimension to be reduced. Optionally a label_prefix can be provided to prepend to the result Element label. """ function, dims = self._reduce_map(dimensions, function, reduce_map) if len(dims) == self.ndims: if isinstance(function, np.ufunc): return function.reduce(self.data, axis=None) else: return function(self.data) else: dimension = dims[0] other_dimension = [d for d in self.kdims if d.name != dimension] oidx = self.get_dimension_index(other_dimension[0]) x_vals = self.dimension_values(other_dimension[0].name, False) reduced = function(self._zdata, axis=oidx) if oidx and hasattr(self, 'bounds'): reduced = reduced[::-1] data = zip(x_vals, reduced) params = dict(dict(self.param.get_param_values(onlychanged=True)), kdims=other_dimension, vdims=self.vdims) params.pop('bounds', None) params.pop('extents', None) return Table(data, **params)
Example #26
Source File: raster.py From holoviews with BSD 3-Clause "New" or "Revised" License | 5 votes |
def collapse_data(cls, data_list, function, kdims=None, **kwargs): param.main.param.warning( 'Raster.collapse_data is deprecated, collapsing ' 'may now be performed through concatenation ' 'and aggregation.') if isinstance(function, np.ufunc): return function.reduce(data_list) else: return function(np.dstack(data_list), axis=-1, **kwargs)
Example #27
Source File: test_map.py From marvin with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_np_ufunc_notimplemented(self, maps_release_only, ufunc): ha = maps_release_only.emline_gflux_ha_6564 nii = maps_release_only.emline_gflux_nii_6585 with pytest.raises(NotImplementedError) as ee: if getattr(getattr(np, ufunc), 'nargs') <= 2: getattr(np, ufunc)(ha) else: getattr(np, ufunc)(nii, ha) expected = 'np.{0} is not implemented for Map.'.format(getattr(np, ufunc).__name__) assert str(ee.value) == expected
Example #28
Source File: utils.py From scprep with GNU General Public License v3.0 | 5 votes |
def matrix_transform(data, fun, *args, **kwargs): """Perform a numerical transformation to data Parameters ---------- data : array-like, shape=[n_samples, n_features] Input data fun : callable Numerical transformation function, `np.ufunc` or similar. args, kwargs : additional arguments, optional arguments for `fun`. `data` is always passed as the first argument Returns ------- data : array-like, shape=[n_samples, n_features] Transformed output data """ if is_sparse_dataframe(data) or is_SparseDataFrame(data): data = data.copy() for col in data.columns: data[col] = fun(data[col], *args, **kwargs) elif sparse.issparse(data): if isinstance(data, (sparse.lil_matrix, sparse.dok_matrix)): data = data.tocsr() else: # avoid modifying in place data = data.copy() data.data = fun(data.data, *args, **kwargs) else: data = fun(data, *args, **kwargs) return data
Example #29
Source File: core.py From sparse with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _grouped_reduce(x, groups, method, **kwargs): """ Performs a :code:`ufunc` grouped reduce. Parameters ---------- x : np.ndarray The data to reduce. groups : np.ndarray The groups the data belongs to. The groups must be contiguous. method : np.ufunc The :code:`ufunc` to use to perform the reduction. kwargs : dict The kwargs to pass to the :code:`ufunc`'s :code:`reduceat` function. Returns ------- result : np.ndarray The result of the grouped reduce operation. inv_idx : np.ndarray The index of the first element where each group is found. counts : np.ndarray The number of elements in each group. """ # Partial credit to @shoyer # Ref: https://gist.github.com/shoyer/f538ac78ae904c936844 inv_idx, counts = _calc_counts_invidx(groups) result = method.reduceat(x, inv_idx, **kwargs) return result, inv_idx, counts
Example #30
Source File: core.py From sparse with BSD 3-Clause "New" or "Revised" License | 5 votes |
def round(self, decimals=0, out=None): """ Evenly round to the given number of decimals. See also -------- :obj:`numpy.round` : NumPy equivalent ufunc. :obj:`COO.elemwise`: Apply an arbitrary element-wise function to one or two arguments. """ if out is not None and not isinstance(out, tuple): out = (out,) return self.__array_ufunc__( np.round, "__call__", self, decimals=decimals, out=out )