Python operator.add() Examples

The following are 30 code examples of operator.add(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module operator , or try the search function .
Example #1
Source File: test_series.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_binary_operators(self):

        # skipping for now #####
        import pytest
        pytest.skip("skipping sparse binary operators test")

        def _check_inplace_op(iop, op):
            tmp = self.bseries.copy()

            expected = op(tmp, self.bseries)
            iop(tmp, self.bseries)
            tm.assert_sp_series_equal(tmp, expected)

        inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']
        for op in inplace_ops:
            _check_inplace_op(getattr(operator, "i%s" % op),
                              getattr(operator, op)) 
Example #2
Source File: test_extint128.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_safe_binop():
    # Test checked arithmetic routines

    ops = [
        (operator.add, 1),
        (operator.sub, 2),
        (operator.mul, 3)
    ]

    with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
        for xop, a, b in it:
            pyop, op = xop
            c = pyop(a, b)

            if not (INT64_MIN <= c <= INT64_MAX):
                assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
            else:
                d = mt.extint_safe_binop(a, b, op)
                if c != d:
                    # assert_equal is slow
                    assert_equal(d, c) 
Example #3
Source File: test_classes.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_add(Poly):
    # This checks commutation, not numerical correctness
    c1 = list(random((4,)) + .5)
    c2 = list(random((3,)) + .5)
    p1 = Poly(c1)
    p2 = Poly(c2)
    p3 = p1 + p2
    assert_poly_almost_equal(p2 + p1, p3)
    assert_poly_almost_equal(p1 + c2, p3)
    assert_poly_almost_equal(c2 + p1, p3)
    assert_poly_almost_equal(p1 + tuple(c2), p3)
    assert_poly_almost_equal(tuple(c2) + p1, p3)
    assert_poly_almost_equal(p1 + np.array(c2), p3)
    assert_poly_almost_equal(np.array(c2) + p1, p3)
    assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
    assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
    if Poly is Polynomial:
        assert_raises(TypeError, op.add, p1, Chebyshev([0]))
    else:
        assert_raises(TypeError, op.add, p1, Polynomial([0])) 
Example #4
Source File: helpers.py    From tensortrade with Apache License 2.0 6 votes vote down vote up
def create_internal_feed(portfolio: 'Portfolio'):

    base_symbol = portfolio.base_instrument.symbol
    sources = []

    for wallet in portfolio.wallets:
        symbol = wallet.instrument.symbol
        sources += [wallet.exchange]
        sources += [create_wallet_source(wallet, include_worth=(symbol != base_symbol))]

    worth_nodes = Condition(
        lambda node: node.name.endswith(base_symbol + ":/total") or node.name.endswith("worth")
    )(*sources)

    net_worth = Reduce(func=operator.add)(worth_nodes).rename("net_worth")

    sources += [net_worth]

    feed = DataFeed(sources).attach(portfolio)

    return feed 
Example #5
Source File: offsets.py    From recruit with Apache License 2.0 6 votes vote down vote up
def apply_index(self, i):
        time = i.to_perioddelta('D')
        # to_period rolls forward to next BDay; track and
        # reduce n where it does when rolling forward
        asper = i.to_period('B')
        if not isinstance(asper._data, np.ndarray):
            # unwrap PeriodIndex --> PeriodArray
            asper = asper._data

        if self.n > 0:
            shifted = (i.to_perioddelta('B') - time).asi8 != 0

            # Integer-array addition is deprecated, so we use
            # _time_shift directly
            roll = np.where(shifted, self.n - 1, self.n)
            shifted = asper._addsub_int_array(roll, operator.add)
        else:
            # Integer addition is deprecated, so we use _time_shift directly
            roll = self.n
            shifted = asper._time_shift(roll)

        result = shifted.to_timestamp() + time
        return result 
Example #6
Source File: test_panel.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_arith(self):
        self._test_op(self.panel, operator.add)
        self._test_op(self.panel, operator.sub)
        self._test_op(self.panel, operator.mul)
        self._test_op(self.panel, operator.truediv)
        self._test_op(self.panel, operator.floordiv)
        self._test_op(self.panel, operator.pow)

        self._test_op(self.panel, lambda x, y: y + x)
        self._test_op(self.panel, lambda x, y: y - x)
        self._test_op(self.panel, lambda x, y: y * x)
        self._test_op(self.panel, lambda x, y: y / x)
        self._test_op(self.panel, lambda x, y: y ** x)

        self._test_op(self.panel, lambda x, y: x + y)  # panel + 1
        self._test_op(self.panel, lambda x, y: x - y)  # panel - 1
        self._test_op(self.panel, lambda x, y: x * y)  # panel * 1
        self._test_op(self.panel, lambda x, y: x / y)  # panel / 1
        self._test_op(self.panel, lambda x, y: x ** y)  # panel ** 1

        pytest.raises(Exception, self.panel.__add__,
                      self.panel['ItemA']) 
Example #7
Source File: ndarray.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def attach_grad(self, grad_req='write', stype=None):
        """Attach a gradient buffer to this NDArray, so that `backward`
        can compute gradient with respect to it.

        Parameters
        ----------
        grad_req : {'write', 'add', 'null'}
            How gradient will be accumulated.
            - 'write': gradient will be overwritten on every backward.
            - 'add': gradient will be added to existing value on every backward.
            - 'null': do not compute gradient for this NDArray.
        stype : str, optional
            The storage type of the gradient array. Defaults to the same stype of this NDArray.
        """
        from . import zeros as _zeros
        if stype is not None:
            grad = _zeros(self.shape, stype=stype)
        else:
            grad = op.zeros_like(self)  # pylint: disable=undefined-variable
        grad_req = _GRAD_REQ_MAP[grad_req]
        check_call(_LIB.MXAutogradMarkVariables(
            1, ctypes.pointer(self.handle),
            ctypes.pointer(mx_uint(grad_req)),
            ctypes.pointer(grad.handle))) 
Example #8
Source File: test_panel.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_arith_flex_panel(self):
        ops = ['add', 'sub', 'mul', 'div',
               'truediv', 'pow', 'floordiv', 'mod']
        if not compat.PY3:
            aliases = {}
        else:
            aliases = {'div': 'truediv'}
        self.panel = self.panel.to_panel()

        for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
            for op in ops:
                alias = aliases.get(op, op)
                f = getattr(operator, alias)
                exp = f(self.panel, n)
                result = getattr(self.panel, op)(n)
                assert_panel_equal(result, exp, check_panel_type=True)

                # rops
                r_f = lambda x, y: f(y, x)
                exp = r_f(self.panel, n)
                result = getattr(self.panel, 'r' + op)(n)
                assert_panel_equal(result, exp) 
Example #9
Source File: movie_recommender.py    From Data_Analytics_with_Hadoop with MIT License 5 votes vote down vote up
def compute_rmse(model, data, n):
    """
    Compute RMSE (Root Mean Squared Error), or square root of the average value
        of (actual rating - predicted rating)^2
    """
    predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))
    predictions_ratings = predictions.map(lambda x: ((x[0], x[1]), x[2])) \
      .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \
      .values()
    return sqrt(predictions_ratings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))


##########################################################################
# Main
########################################################################## 
Example #10
Source File: test_panel.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_combine_series(self):
        s = self.panel['ItemA'][:10]
        result = self.panel.add(s, axis=0)
        expected = DataFrame.add(self.panel, s, axis=0)
        assert_frame_equal(result, expected)

        s = self.panel.iloc[5]
        result = self.panel + s
        expected = DataFrame.add(self.panel, s, axis=1)
        assert_frame_equal(result, expected) 
Example #11
Source File: test_panel.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_combinePanel(self):
        wp = self.panel.to_panel()
        result = self.panel.add(self.panel)
        wide_result = result.to_panel()
        assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])

        # one item
        result = self.panel.add(self.panel.filter(['ItemA'])) 
Example #12
Source File: test_decimal.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_combine_from_sequence_raises():
    # https://github.com/pandas-dev/pandas/issues/22850
    ser = pd.Series(DecimalArrayWithoutFromSequence([
        decimal.Decimal("1.0"),
        decimal.Decimal("2.0")
    ]))
    result = ser.combine(ser, operator.add)

    # note: object dtype
    expected = pd.Series([decimal.Decimal("2.0"),
                          decimal.Decimal("4.0")], dtype="object")
    tm.assert_series_equal(result, expected) 
Example #13
Source File: offsets.py    From recruit with Apache License 2.0 5 votes vote down vote up
def __add__(self, other):
        if isinstance(other, Tick):
            if type(self) == type(other):
                return type(self)(self.n + other.n)
            else:
                return _delta_to_tick(self.delta + other.delta)
        elif isinstance(other, ABCPeriod):
            return other + self
        try:
            return self.apply(other)
        except ApplyTypeError:
            return NotImplemented
        except OverflowError:
            raise OverflowError("the add operation between {self} and {other} "
                                "will overflow".format(self=self, other=other)) 
Example #14
Source File: offsets.py    From recruit with Apache License 2.0 5 votes vote down vote up
def _end_apply_index(self, dtindex):
        """
        Add self to the given DatetimeIndex, specialized for case where
        self.weekday is non-null.

        Parameters
        ----------
        dtindex : DatetimeIndex

        Returns
        -------
        result : DatetimeIndex
        """
        off = dtindex.to_perioddelta('D')

        base, mult = libfrequencies.get_freq_code(self.freqstr)
        base_period = dtindex.to_period(base)
        if not isinstance(base_period._data, np.ndarray):
            # unwrap PeriodIndex --> PeriodArray
            base_period = base_period._data

        if self.n > 0:
            # when adding, dates on end roll to next
            normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns')
            roll = np.where(base_period.to_timestamp(how='end') == normed,
                            self.n, self.n - 1)
            # integer-array addition on PeriodIndex is deprecated,
            #  so we use _addsub_int_array directly
            shifted = base_period._addsub_int_array(roll, operator.add)
            base = shifted.to_timestamp(how='end')
        else:
            # integer addition on PeriodIndex is deprecated,
            #  so we use _time_shift directly
            roll = self.n
            base = base_period._time_shift(roll).to_timestamp(how='end')

        return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D') 
Example #15
Source File: test_panel.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_raise_when_not_implemented(self):
        p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
                  items=['ItemA', 'ItemB', 'ItemC'],
                  major_axis=date_range('20130101', periods=4),
                  minor_axis=list('ABCDE'))
        d = p.sum(axis=1).iloc[0]
        ops = ['add', 'sub', 'mul', 'truediv',
               'floordiv', 'div', 'mod', 'pow']
        for op in ops:
            with pytest.raises(NotImplementedError):
                getattr(p, op)(d, axis=0) 
Example #16
Source File: segment_tree.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def __init__(self, capacity):
        super(SumSegmentTree, self).__init__(
            capacity=capacity,
            operation=operator.add,
            neutral_element=0.0
        ) 
Example #17
Source File: segment_tree.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def __init__(self, capacity):
        super(SumSegmentTree, self).__init__(
            capacity=capacity,
            operation=operator.add,
            neutral_element=0.0
        ) 
Example #18
Source File: segment_tree.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def __init__(self, capacity):
        super(SumSegmentTree, self).__init__(
            capacity=capacity,
            operation=operator.add,
            neutral_element=0.0
        ) 
Example #19
Source File: spark-app.py    From Data_Analytics_with_Hadoop with MIT License 5 votes vote down vote up
def main(sc):
    """
    Primary analysis mechanism for Spark application
    """

    # Load stopwords from the dataset
    with open('stopwords.txt', 'r') as words:
        stopwords = frozenset([
            word.strip() for word in words.read().split("\n")
        ])

    # Broadcast the stopwords across the cluster
    stopwords = sc.broadcast(stopwords)

    # Create an accumulator to count total number of trigrams and words.
    words     = sc.accumulator(0)
    trigrams  = sc.accumulator(0)

    # Load and parse the corpus from HDFS and insert into an RDD
    tokens = sc.textFile("textcorpus").flatMap(tokenize)

    # Perform the word count
    tokens.foreach(lambda w: words.add(1))

    # Filter stopwords and extract trigrams
    tokens = tokens.filter(partial(include, stopwords=stopwords))
    tokens = tokens.mapPartitions(partial(ngrams, n=3))

    # Perform the trigram count
    tokens.foreach(lambda ng: trigrams.add(1))

    # Counter per-trigram frequency
    tokens = tokens.map(lambda t: (t, 1)).reduceByKey(add)

    # Write output to disk
    tokens.saveAsTextFile("trigrams")

    print "Number of trigrams: {} in {} words.".format(trigrams.value, words.value)
    for trigram, frequency in tokens.sortBy(lambda (t,c): c).take(100):
        print "{}: {}".format(frequency, trigram) 
Example #20
Source File: spark-app.py    From Data_Analytics_with_Hadoop with MIT License 5 votes vote down vote up
def main(sc):
    """
    Primary analysis mechanism for Spark application
    """

    # Load stopwords from the dataset
    with open('stopwords.txt', 'r') as words:
        stopwords = frozenset([
            word.strip() for word in words.read().split("\n")
        ])

    # Broadcast the stopwords across the cluster
    stopwords = sc.broadcast(stopwords)

    # Load the corpus as whole test files and chunk them.
    corpus  = sc.wholeTextFiles('reuters.txt').flatMap(chunk)

    # Phase one: tokenize and sum (word, docid), count pairs (document frequency).
    docfreq = corpus.flatMap(partial(tokenize, stopwords=stopwords))
    docfreq = docfreq.reduceByKey(add)

    # Phase two: compute term frequency then perform keyspace change.
    trmfreq = docfreq.map(lambda (key, tf): (key[1], (key[0], tf, 1)))
    trmfreq = trmfreq.reduceByKey(term_frequency)
    trmfreq = trmfreq.map(lambda (word, (docid, tf, n)): ((word, docid), (tf, n)))

    # Phase three: comptue the tf-idf of each word, document pair.
    tfidfs  = trmfreq.map(tfidf)

    # Write the results out to disk
    tfidfs.saveAsTextFile("reuters-tfidfs") 
Example #21
Source File: sortedlist.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def recursive_repr(fillvalue='...'):
    "Decorator to make a repr function return fillvalue for a recursive call."
    # pylint: disable=missing-docstring
    # Copied from reprlib in Python 3
    # https://hg.python.org/cpython/file/3.6/Lib/reprlib.py

    def decorating_function(user_function):
        repr_running = set()

        @wraps(user_function)
        def wrapper(self):
            key = id(self), get_ident()
            if key in repr_running:
                return fillvalue
            repr_running.add(key)
            try:
                result = user_function(self)
            finally:
                repr_running.discard(key)
            return result

        return wrapper

    return decorating_function

###############################################################################
# END Python 2/3 Shims
############################################################################### 
Example #22
Source File: matchmaker.py    From Data_Analytics_with_Hadoop with MIT License 5 votes vote down vote up
def compute_rmse(model, data, n):
    """
    Compute RMSE (Root Mean Squared Error), or square root of the average value
        of (actual rating - predicted rating)^2
    """
    predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))
    predictions_ratings = predictions.map(lambda x: ((x[0], x[1]), x[2])) \
      .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \
      .values()
    return sqrt(predictions_ratings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))


##########################################################################
# Main
########################################################################## 
Example #23
Source File: sparse.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def __add__(self, other):
        return add(self, other) 
Example #24
Source File: sortedlist.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def update(self, iterable):
        """Update sorted-key list by adding all values from `iterable`.

        Runtime complexity: `O(k*log(n))` -- approximate.

        >>> from operator import neg
        >>> skl = SortedKeyList(key=neg)
        >>> skl.update([3, 1, 2])
        >>> skl
        SortedKeyList([3, 2, 1], key=<built-in function neg>)

        :param iterable: iterable of values to add

        """
        _lists = self._lists
        _keys = self._keys
        _maxes = self._maxes
        values = sorted(iterable, key=self._key)

        if _maxes:
            if len(values) * 4 >= self._len:
                values.extend(chain.from_iterable(_lists))
                values.sort(key=self._key)
                self._clear()
            else:
                _add = self.add
                for val in values:
                    _add(val)
                return

        _load = self._load
        _lists.extend(values[pos:(pos + _load)]
                      for pos in range(0, len(values), _load))
        _keys.extend(list(map(self._key, _list)) for _list in _lists)
        _maxes.extend(sublist[-1] for sublist in _keys)
        self._len = len(values)
        del self._index[:] 
Example #25
Source File: sortedlist.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def append(self, value):
        """Raise not-implemented error.

        Implemented to override `MutableSequence.append` which provides an
        erroneous default implementation.

        :raises NotImplementedError: use ``sl.add(value)`` instead

        """
        raise NotImplementedError('use ``sl.add(value)`` instead') 
Example #26
Source File: sortedlist.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def append(self, value):
        """Raise not-implemented error.

        Implemented to override `MutableSequence.append` which provides an
        erroneous default implementation.

        :raises NotImplementedError: use ``sl.add(value)`` instead

        """
        raise NotImplementedError('use ``sl.add(value)`` instead') 
Example #27
Source File: sortedlist.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __setitem__(self, index, value):
        """Raise not-implemented error.

        ``sl.__setitem__(index, value)`` <==> ``sl[index] = value``

        :raises NotImplementedError: use ``del sl[index]`` and
            ``sl.add(value)`` instead

        """
        message = 'use ``del sl[index]`` and ``sl.add(value)`` instead'
        raise NotImplementedError(message) 
Example #28
Source File: sortedlist.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def update(self, iterable):
        """Update sorted list by adding all values from `iterable`.

        Runtime complexity: `O(k*log(n))` -- approximate.

        >>> sl = SortedList()
        >>> sl.update([3, 1, 2])
        >>> sl
        SortedList([1, 2, 3])

        :param iterable: iterable of values to add

        """
        _lists = self._lists
        _maxes = self._maxes
        values = sorted(iterable)

        if _maxes:
            if len(values) * 4 >= self._len:
                values.extend(chain.from_iterable(_lists))
                values.sort()
                self._clear()
            else:
                _add = self.add
                for val in values:
                    _add(val)
                return

        _load = self._load
        _lists.extend(values[pos:(pos + _load)]
                      for pos in range(0, len(values), _load))
        _maxes.extend(sublist[-1] for sublist in _lists)
        self._len = len(values)
        del self._index[:] 
Example #29
Source File: sortedlist.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def insert(self, index, value):
        """Raise not-implemented error.

        :raises NotImplementedError: use ``sl.add(value)`` instead

        """
        raise NotImplementedError('use ``sl.add(value)`` instead') 
Example #30
Source File: sortedlist.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def add(self, value):
        """Add `value` to sorted list.

        Runtime complexity: `O(log(n))` -- approximate.

        >>> sl = SortedList()
        >>> sl.add(3)
        >>> sl.add(1)
        >>> sl.add(2)
        >>> sl
        SortedList([1, 2, 3])

        :param value: value to add to sorted list

        """
        _lists = self._lists
        _maxes = self._maxes

        if _maxes:
            pos = bisect_right(_maxes, value)

            if pos == len(_maxes):
                pos -= 1
                _lists[pos].append(value)
                _maxes[pos] = value
            else:
                insort(_lists[pos], value)

            self._expand(pos)
        else:
            _lists.append([value])
            _maxes.append(value)

        self._len += 1