Python functools.reduce() Examples

The following are code examples for showing how to use functools.reduce(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: fine-lm   Author: akzaidi   File: common_attention.py    MIT License 7 votes vote down vote up
def gather_indices_2d(x, block_shape, block_stride):
  """Getting gather indices."""
  # making an identity matrix kernel
  kernel = tf.eye(block_shape[0] * block_shape[1])
  kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
  # making indices [1, h, w, 1] to appy convs
  x_shape = common_layers.shape_list(x)
  indices = tf.range(x_shape[2] * x_shape[3])
  indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1])
  indices = tf.nn.conv2d(
      tf.cast(indices, tf.float32),
      kernel,
      strides=[1, block_stride[0], block_stride[1], 1],
      padding="VALID")
  # making indices [num_blocks, dim] to gather
  dims = common_layers.shape_list(indices)[:3]
  if all([isinstance(dim, int) for dim in dims]):
    num_blocks = functools.reduce(operator.mul, dims, 1)
  else:
    num_blocks = tf.reduce_prod(dims)
  indices = tf.reshape(indices, [num_blocks, -1])
  return tf.cast(indices, tf.int32) 
Example 2
Project: pyblish-win   Author: pyblish   File: difflib.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def ratio(self):
        """Return a measure of the sequences' similarity (float in [0,1]).

        Where T is the total number of elements in both sequences, and
        M is the number of matches, this is 2.0*M / T.
        Note that this is 1 if the sequences are identical, and 0 if
        they have nothing in common.

        .ratio() is expensive to compute if you haven't already computed
        .get_matching_blocks() or .get_opcodes(), in which case you may
        want to try .quick_ratio() or .real_quick_ratio() first to get an
        upper bound.

        >>> s = SequenceMatcher(None, "abcd", "bcde")
        >>> s.ratio()
        0.75
        >>> s.quick_ratio()
        0.75
        >>> s.real_quick_ratio()
        1.0
        """

        matches = reduce(lambda sum, triple: sum + triple[-1],
                         self.get_matching_blocks(), 0)
        return _calculate_ratio(matches, len(self.a) + len(self.b)) 
Example 3
Project: pyblish-win   Author: pyblish   File: test_random.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_sample_distribution(self):
        # For the entire allowable range of 0 <= k <= N, validate that
        # sample generates all possible permutations
        n = 5
        pop = range(n)
        trials = 10000  # large num prevents false negatives without slowing normal case
        def factorial(n):
            return reduce(int.__mul__, xrange(1, n), 1)
        for k in xrange(n):
            expected = factorial(n) // factorial(n-k)
            perms = {}
            for i in xrange(trials):
                perms[tuple(self.gen.sample(pop, k))] = None
                if len(perms) == expected:
                    break
            else:
                self.fail() 
Example 4
Project: xrft   Author: xgcm   File: xrft.py    MIT License 6 votes vote down vote up
def _apply_window(da, dims, window_type='hanning'):
    """Creating windows in dimensions dims."""

    if window_type not in ['hanning']:
        raise NotImplementedError("Only hanning window is supported for now.")

    numpy_win_func = getattr(np, window_type)

    if da.chunks:
        def dask_win_func(n):
            return dsar.from_delayed(
                delayed(numpy_win_func, pure=True)(n),
                (n,), float)
        win_func = dask_win_func
    else:
        win_func = numpy_win_func

    windows = [xr.DataArray(win_func(len(da[d])),
               dims=da[d].dims, coords=da[d].coords) for d in dims]

    return da * reduce(operator.mul, windows[::-1]) 
Example 5
Project: models   Author: kipoi   File: convert_Basset_to_pytorch.py    MIT License 6 votes vote down vote up
def simplify_source(s):
    s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d',')'),s)
    s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d',')'),s)
    s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d',')'),s)
    s = map(lambda x: x.replace(',bias=True),#Conv2d',')'),s)
    s = map(lambda x: x.replace('),#Conv2d',')'),s)
    s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d',')'),s)
    s = map(lambda x: x.replace('),#BatchNorm2d',')'),s)
    s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d',')'),s)
    s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d',')'),s)
    s = map(lambda x: x.replace('),#MaxPool2d',')'),s)
    s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d',')'),s)
    s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d',')'),s)
    s = map(lambda x: x.replace(',bias=True)),#Linear',')), # Linear'),s)
    s = map(lambda x: x.replace(')),#Linear',')), # Linear'),s)

    s = map(lambda x: '{},\n'.format(x),s)
    s = map(lambda x: x[1:],s)
    s = reduce(lambda x,y: x+y, s)
    return s 
Example 6
Project: molecule-hetznercloud   Author: pycontribs   File: conftest.py    MIT License 6 votes vote down vote up
def molecule_data(
    _molecule_dependency_galaxy_section_data,
    _molecule_driver_section_data,
    _molecule_lint_section_data,
    _molecule_platforms_section_data,
    _molecule_provisioner_section_data,
    _molecule_scenario_section_data,
    _molecule_verifier_section_data,
):

    fixtures = [
        _molecule_dependency_galaxy_section_data,
        _molecule_driver_section_data,
        _molecule_lint_section_data,
        _molecule_platforms_section_data,
        _molecule_provisioner_section_data,
        _molecule_scenario_section_data,
        _molecule_verifier_section_data,
    ]

    return functools.reduce(lambda x, y: util.merge_dicts(x, y), fixtures) 
Example 7
Project: cs294-112_hws   Author: xuwd11   File: tabulate.py    MIT License 6 votes vote down vote up
def _column_type(strings, has_invisible=True):
    """The least generic type all column values are convertible to.

    >>> _column_type(["1", "2"]) is _int_type
    True
    >>> _column_type(["1", "2.3"]) is _float_type
    True
    >>> _column_type(["1", "2.3", "four"]) is _text_type
    True
    >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
    True
    >>> _column_type([None, "brux"]) is _text_type
    True
    >>> _column_type([1, 2, None]) is _int_type
    True
    >>> import datetime as dt
    >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
    True

    """
    types = [_type(s, has_invisible) for s in strings ]
    return reduce(_more_generic, types, int) 
Example 8
Project: mpu   Author: MartinThoma   File: math.py    MIT License 6 votes vote down vote up
def product(iterable, start=1):
    """
    Calculate the product of the iterables.

    Parameters
    ----------
    iterable : iterable
        List, tuple or similar which contains numbers
    start : number, optional (default: 1)

    Returns
    -------
    product : number

    Examples
    --------
    >>> product([1, 2, 3, 4, 5])
    120
    >>> product([])
    1
    """
    return reduce(operator.mul, iterable, start) 
Example 9
Project: fine-lm   Author: akzaidi   File: rl.py    MIT License 6 votes vote down vote up
def feed_forward_categorical_fun(action_space, config, observations):
  """Feed-forward categorical."""
  if not isinstance(action_space, gym.spaces.Discrete):
    raise ValueError("Expecting discrete action space.")
  flat_observations = tf.reshape(observations, [
      tf.shape(observations)[0], tf.shape(observations)[1],
      functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
  with tf.variable_scope("network_parameters"):
    with tf.variable_scope("policy"):
      x = flat_observations
      for size in config.policy_layers:
        x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
      logits = tf.contrib.layers.fully_connected(x, action_space.n,
                                                 activation_fn=None)
    with tf.variable_scope("value"):
      x = flat_observations
      for size in config.value_layers:
        x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
      value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
  policy = tf.contrib.distributions.Categorical(logits=logits)
  return NetworkOutput(policy, value, lambda a: a) 
Example 10
Project: fine-lm   Author: akzaidi   File: rl.py    MIT License 6 votes vote down vote up
def dense_bitwise_categorical_fun(action_space, config, observations):
  """Dense network with bitwise input and categorical output."""
  del config
  obs_shape = common_layers.shape_list(observations)
  x = tf.reshape(observations, [-1] + obs_shape[2:])

  with tf.variable_scope("network_parameters"):
    with tf.variable_scope("dense_bitwise"):
      x = discretization.int_to_bit_embed(x, 8, 32)
      flat_x = tf.reshape(
          x, [obs_shape[0], obs_shape[1],
              functools.reduce(operator.mul, x.shape.as_list()[1:], 1)])

      x = tf.contrib.layers.fully_connected(flat_x, 256, tf.nn.relu)
      x = tf.contrib.layers.fully_connected(flat_x, 128, tf.nn.relu)

      logits = tf.contrib.layers.fully_connected(x, action_space.n,
                                                 activation_fn=None)

      value = tf.contrib.layers.fully_connected(
          x, 1, activation_fn=None)[..., 0]
      policy = tf.contrib.distributions.Categorical(logits=logits)

  return NetworkOutput(policy, value, lambda a: a) 
Example 11
Project: relay-bench   Author: uwsampl   File: dashboard.py    Apache License 2.0 6 votes vote down vote up
def get_timing_info(info, exp_name):
    '''
        Get the timing information of an experiment
        recorded in `run.json`.
    '''
    run_status = validate_json(info.exp_status_dir(exp_name), 
                                    'success',
                                    'start_time', 
                                    'end_time', 
                                    'time_delta', filename='run.json')
    # validate run.json data
    keys = run_status.keys()
    if keys and functools.reduce(lambda x, y: x and y, 
                                    map(lambda x: x in keys, 
                                       ('start_time', 'end_time', 'time_delta'))):
        rs_get = run_status.get
        return {
            'start_time' : rs_get('start_time'),
            'end_time'   : rs_get('end_time'),
            'time_delta' : rs_get('time_delta')
        }
    return {} 
Example 12
Project: lirpg   Author: Hwhitetooth   File: ddpg.py    MIT License 6 votes vote down vote up
def setup_critic_optimizer(self):
        logger.info('setting up critic optimizer')
        normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1])
        self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf))
        if self.critic_l2_reg > 0.:
            critic_reg_vars = [var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name]
            for var in critic_reg_vars:
                logger.info('  regularizing: {}'.format(var.name))
            logger.info('  applying l2 regularization with {}'.format(self.critic_l2_reg))
            critic_reg = tc.layers.apply_regularization(
                tc.layers.l2_regularizer(self.critic_l2_reg),
                weights_list=critic_reg_vars
            )
            self.critic_loss += critic_reg
        critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars]
        critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes])
        logger.info('  critic shapes: {}'.format(critic_shapes))
        logger.info('  critic params: {}'.format(critic_nb_params))
        self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm)
        self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars,
            beta1=0.9, beta2=0.999, epsilon=1e-08) 
Example 13
Project: mindustry-modding   Author: SimonWoodburyForget   File: to_wiki.py    GNU General Public License v3.0 6 votes vote down vote up
def normalize(md):
    '''Normalize anchors.'''
    def on_match(link):
        desc = link.group(1)
        old = link.group(2)
        href = (link.group(2)
                .lower()
                .replace('%20', '-')
                .replace(" ", "-")
                .replace("~", "")
                .replace(".", ""))
        old, new = f'[{desc}]({old})', f'[{desc}]({href})'
        print(old, new)
        return old, new

    replacers = set((on_match(x) for x in re.finditer(r'\[([^\]\[]*)\]\((#[^\)]*)\)', md)))
    return ft.reduce(lambda md, x: md.replace(x[0], x[1]), replacers, md) 
Example 14
Project: pyblish-win   Author: pyblish   File: test_functools.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_reduce(self):
        class Squares:

            def __init__(self, max):
                self.max = max
                self.sofar = []

            def __len__(self): return len(self.sofar)

            def __getitem__(self, i):
                if not 0 <= i < self.max: raise IndexError
                n = len(self.sofar)
                while n <= i:
                    self.sofar.append(n*n)
                    n += 1
                return self.sofar[i]

        reduce = functools.reduce
        self.assertEqual(reduce(lambda x, y: x+y, ['a', 'b', 'c'], ''), 'abc')
        self.assertEqual(
            reduce(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], []),
            ['a','c','d','w']
        )
        self.assertEqual(reduce(lambda x, y: x*y, range(2,8), 1), 5040)
        self.assertEqual(
            reduce(lambda x, y: x*y, range(2,21), 1L),
            2432902008176640000L
        )
        self.assertEqual(reduce(lambda x, y: x+y, Squares(10)), 285)
        self.assertEqual(reduce(lambda x, y: x+y, Squares(10), 0), 285)
        self.assertEqual(reduce(lambda x, y: x+y, Squares(0), 0), 0)
        self.assertRaises(TypeError, reduce)
        self.assertRaises(TypeError, reduce, 42, 42)
        self.assertRaises(TypeError, reduce, 42, 42, 42)
        self.assertEqual(reduce(42, "1"), "1") # func is never called with one item
        self.assertEqual(reduce(42, "", "1"), "1") # func is never called with one item
        self.assertRaises(TypeError, reduce, 42, (42, 42)) 
Example 15
Project: pyblish-win   Author: pyblish   File: test_itertools.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def prod(iterable):
    return reduce(operator.mul, iterable, 1) 
Example 16
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: environment.py    Apache License 2.0 5 votes vote down vote up
def preprocess(self, source, name=None, filename=None):
        """Preprocesses the source with all extensions.  This is automatically
        called for all parsing and compiling methods but *not* for :meth:`lex`
        because there you usually only want the actual source tokenized.
        """
        return reduce(lambda s, e: e.preprocess(s, name, filename),
                      self.iter_extensions(), text_type(source)) 
Example 17
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: environment.py    Apache License 2.0 5 votes vote down vote up
def preprocess(self, source, name=None, filename=None):
        """Preprocesses the source with all extensions.  This is automatically
        called for all parsing and compiling methods but *not* for :meth:`lex`
        because there you usually only want the actual source tokenized.
        """
        return reduce(lambda s, e: e.preprocess(s, name, filename),
                      self.iter_extensions(), text_type(source)) 
Example 18
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: itsdangerous.py    Apache License 2.0 5 votes vote down vote up
def bytes_to_int(bytestr):
    return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0) 
Example 19
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: itsdangerous.py    Apache License 2.0 5 votes vote down vote up
def bytes_to_int(bytestr):
    return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0) 
Example 20
Project: ieml   Author: IEMLdev   File: ieml_database.py    GNU General Public License v3.0 5 votes vote down vote up
def get_values_partial(self, ieml, language=None, descriptor=None):
        ieml, language, descriptor = _normalize_key(ieml, language, descriptor,
                                                    parse_ieml=False, partial=True)

        key = {'ieml': ieml, 'language': language, 'descriptor': descriptor}
        key = reduce(operator.and_,
                     [self.df.index.get_level_values(k) == v for k, v in key.items() if v is not None],
                     True)

        res = defaultdict(list)
        for k, (v,) in self.df[key].iterrows():
            res[k].append(v)

        return dict(res) 
Example 21
Project: ieml   Author: IEMLdev   File: ieml_database.py    GNU General Public License v3.0 5 votes vote down vote up
def get_values_partial(self, ieml, key=None):
        ieml, key, _ = _normalize_key(ieml, key, None, parse_ieml=False, partial=True, structure=True)

        key = {'ieml': ieml, 'key': key}
        key = reduce(operator.and_,
                     [self.df.index.get_level_values(k) == v for k, v in key.items() if v is not None],
                     True)

        res = defaultdict(list)
        for k, (v,) in self.df[key].iterrows():
            res[k].append(v)

        return dict(res) 
Example 22
Project: ieml   Author: IEMLdev   File: character.py    GNU General Public License v3.0 5 votes vote down vote up
def _get_cardinal(self):
        return reduce(mul, [w.cardinal for w in self._semes], initial=1) 
Example 23
Project: ieml   Author: IEMLdev   File: word.py    GNU General Public License v3.0 5 votes vote down vote up
def _get_cardinal(self):
        return reduce(mul, [w.cardinal for char in [self.substance, self.attribute, self.mode] for w in char], 1) 
Example 24
Project: ieml   Author: IEMLdev   File: paths.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, children):
        if not children:
            raise ValueError('Must be a non empty children.')

        super().__init__(children)

        if len(self.children) < 2:
            raise ValueError("A context path must have at least two children.")

        self.cardinal = reduce(mul, [c.cardinal for c in self.children])
        self._resolve_context() 
Example 25
Project: pytuber   Author: tefra   File: storage.py    MIT License 5 votes vote down vote up
def exists(cls, *keys):
        try:
            reduce(dict.__getitem__, keys, cls())
            return True
        except KeyError:
            return False 
Example 26
Project: pytuber   Author: tefra   File: storage.py    MIT License 5 votes vote down vote up
def get(cls, *keys, default=NOTHING):
        try:
            return reduce(dict.__getitem__, keys, cls())
        except KeyError:
            if default == NOTHING:
                raise
            return default 
Example 27
Project: elasticsearch-orm   Author: mayankchutani   File: http_request_util.py    GNU General Public License v3.0 5 votes vote down vote up
def join_urls(*args):
    uri = functools.reduce(lambda x, y: urljoin(x, y), args)
    return uri 
Example 28
Project: speedrun   Author: inferno-pytorch   File: yaml_utils.py    Apache License 2.0 5 votes vote down vote up
def mul(loader, node):
    return reduce(operator.mul, loader.construct_sequence(node)) 
Example 29
Project: speedrun   Author: inferno-pytorch   File: yaml_utils.py    Apache License 2.0 5 votes vote down vote up
def sub(loader, node):
    return reduce(operator.sub, loader.construct_sequence(node)) 
Example 30
Project: speedrun   Author: inferno-pytorch   File: yaml_utils.py    Apache License 2.0 5 votes vote down vote up
def div(loader, node):
    return reduce(operator.truediv, loader.construct_sequence(node)) 
Example 31
Project: neural-fingerprinting   Author: StephanZheng   File: utils_mnist.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def download_and_parse_mnist_file(file_name, datadir=None, force=False):
    file_name = maybe_download_mnist_file(file_name, datadir=datadir,
                                          force=force)

    # Open the file and unzip it if necessary
    if os.path.splitext(file_name)[1] == '.gz':
        open_fn = gzip.open
    else:
        open_fn = open

    # Parse the file
    with open_fn(file_name, 'rb') as file_descriptor:
        header = file_descriptor.read(4)
        assert len(header) == 4

        zeros, data_type, n_dims = struct.unpack('>HBB', header)
        assert zeros == 0

        hex_to_data_type = {
            0x08: 'B',
            0x09: 'b',
            0x0b: 'h',
            0x0c: 'i',
            0x0d: 'f',
            0x0e: 'd'}
        data_type = hex_to_data_type[data_type]

        dim_sizes = struct.unpack(
            '>' + 'I' * n_dims,
            file_descriptor.read(4 * n_dims))

        data = array.array(data_type, file_descriptor.read())
        data.byteswap()

        desired_items = functools.reduce(operator.mul, dim_sizes)
        assert len(data) == desired_items
        return np.array(data).reshape(dim_sizes) 
Example 32
Project: IsThatWho   Author: justanr   File: tmdb.py    MIT License 5 votes vote down vote up
def common(crews):
    return reduce(op.and_, [set(c.keys()) for c in crews]) 
Example 33
Project: ticket_universe   Author: lotify   File: universe.py    MIT License 5 votes vote down vote up
def __len__(self) -> int:
        """ Calculates total size of the universe as a product of the size of each position """
        if len(self.positions) == 0:
            return 0
        return functools.reduce(operator.mul, map(len, self.positions)) 
Example 34
Project: models   Author: kipoi   File: pretrained_model_reloaded_th.py    MIT License 5 votes vote down vote up
def forward(self, input):
        return reduce(self.lambda_func,self.forward_prepare(input)) 
Example 35
Project: models   Author: kipoi   File: convert_Basset_to_pytorch.py    MIT License 5 votes vote down vote up
def forward(self, input):
        # result is a Variable
        return reduce(self.lambda_func,self.forward_prepare(input)) 
Example 36
Project: models   Author: kipoi   File: model_architecture.py    MIT License 5 votes vote down vote up
def forward(self, input):
        return reduce(self.lambda_func,self.forward_prepare(input)) 
Example 37
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: environment.py    MIT License 5 votes vote down vote up
def preprocess(self, source, name=None, filename=None):
        """Preprocesses the source with all extensions.  This is automatically
        called for all parsing and compiling methods but *not* for :meth:`lex`
        because there you usually only want the actual source tokenized.
        """
        return reduce(lambda s, e: e.preprocess(s, name, filename),
                      self.iter_extensions(), text_type(source)) 
Example 38
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: __init__.py    MIT License 5 votes vote down vote up
def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc)) 
Example 39
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: __init__.py    MIT License 5 votes vote down vote up
def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc)) 
Example 40
Project: VSE-C   Author: ExplorerFreda   File: numeral.py    MIT License 5 votes vote down vote up
def valid(word):
    # check number
    try:
        numbers = [w2n.word_to_num(it) for it in word.split()]
        if len(numbers) > 1:
            return False
    except ValueError:
        return False
    # check English
    chars = map(lambda c: 'a' <= c <= 'z', word)
    return reduce(lambda a, b: a or b, chars, False) 
Example 41
Project: Graphlib   Author: HamletWantToCode   File: parse_config.py    MIT License 5 votes vote down vote up
def _get_by_path(tree, keys):
    """Access a nested object in tree by sequence of keys."""
    return reduce(getitem, keys, tree) 
Example 42
Project: sdpqpy   Author: cgogolin   File: ed.py    GNU General Public License v3.0 5 votes vote down vote up
def expressionToMatrix(expr, variables=None, matrices=None):
    """Converts sympy expression expr formulated in terms of the variables
    to a numpy array, by replacing every occurance of a variable in variables
    with the corresponding numpy matrix/array in matrices.
    """

    def evalmonomial(expr, dictionary):
        if expr.func == Operator:
            return dictionary[expr]
        elif expr.func == Integer or expr.func == NegativeOne:
            return int(expr)
        elif expr.func == Float:
            return float(expr)
        elif expr.func == Dagger:
            return evalmonomial(expr.args[0], dictionary).conj().T
        elif expr.func == Pow:
            return np.linalg.matrix_power(evalmonomial(expr.args[0], dictionary),
                                          int(expr.args[1]))
        elif expr.func == Mul:
            return ft.reduce(np.dot, (evalmonomial(arg, dictionary)
                                      for arg in expr.args))
        elif expr.func == Add:
            return ft.reduce(np.add, (evalmonomial(arg, dictionary)
                                      for arg in expr.args))
        else:
            raise ValueError("unknown sympy func: " + str(expr.func))

    dictionary = dict(zip(variables, matrices))
    try:
        matrix = evalmonomial(expr, dictionary)
        return matrix
    except:
        print("\nproblem while processing expr=" +
              str(expr) + "wich consists of:")

        def printtree(expr, level):
            print("level", level, ":", expr, "of type ", expr.func)
            for arg in expr.args:
                printtree(arg, level + 1)
        printtree(expr, 0)
        raise 
Example 43
Project: python-stream   Author: fm100   File: stream.py    MIT License 5 votes vote down vote up
def reduce(self, binary_operator):
        """
        Apply a function of two arguments cumulatively to the items of
        this stream from left to right, so as to reduce this stream to
        a single value.
        """
        return functools.reduce(binary_operator, self) 
Example 44
Project: flasky   Author: RoseOu   File: environment.py    MIT License 5 votes vote down vote up
def preprocess(self, source, name=None, filename=None):
        """Preprocesses the source with all extensions.  This is automatically
        called for all parsing and compiling methods but *not* for :meth:`lex`
        because there you usually only want the actual source tokenized.
        """
        return reduce(lambda s, e: e.preprocess(s, name, filename),
                      self.iter_extensions(), text_type(source)) 
Example 45
Project: flasky   Author: RoseOu   File: pkg_resources.py    MIT License 5 votes vote down vote up
def and_test(cls, nodelist):
        # MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
        return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)]) 
Example 46
Project: flasky   Author: RoseOu   File: pkg_resources.py    MIT License 5 votes vote down vote up
def test(cls, nodelist):
        # MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
        return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)]) 
Example 47
Project: flasky   Author: RoseOu   File: pkg_resources.py    MIT License 5 votes vote down vote up
def and_test(cls, nodelist):
        # MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
        return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)]) 
Example 48
Project: flasky   Author: RoseOu   File: pkg_resources.py    MIT License 5 votes vote down vote up
def test(cls, nodelist):
        # MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
        return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)]) 
Example 49
Project: parasweep   Author: eviatarbach   File: sweepers.py    MIT License 5 votes vote down vote up
def __len__(self):
        return reduce(operator.mul, self.lengths, 1) 
Example 50
Project: django-route   Author: vinayinvicible   File: utils.py    MIT License 5 votes vote down vote up
def gcd_of_list(l):
    return reduce(gcd, l, 0) 
Example 51
Project: Logo-Retrieval-in-Commercial-Plaza   Author: zhang-rongchen   File: utils.py    MIT License 5 votes vote down vote up
def compose(*funcs):
    """Compose arbitrarily many functions, evaluated left to right.

    Reference: https://mathieularose.com/function-composition-in-python/
    """
    # return lambda x: reduce(lambda v, f: f(v), funcs, x)
    if funcs:
        return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
    else:
        raise ValueError('Composition of empty sequence not supported.') 
Example 52
Project: aurora   Author: carnby   File: gensimtools.py    MIT License 5 votes vote down vote up
def lda_topic_descriptors(lda, n_words=25, max_words=10000, word_filter=lambda x: x):
    """
    Given a LDA model, return a list of topics with word descriptors.

    :param lda:
    :param n_words:
    :param max_words:
    :param word_filter:
    :return:
    """
    exponent = 1.0 / float(max_words)
    quotient_cache = {}

    dictionary = lda.id2word

    def quotient(word):
        if word in quotient_cache:
            return quotient_cache[word]
        value = reduce(lambda x, y: x * y, [x[1] for x in lda[dictionary.doc2bow([word])]], 1.0)
        quotient_cache[word] = np.power(value, exponent)
        return value

    def re_rank(topic):
        re_ranked = sorted(topic, key=lambda x: x[0] * np.log(x[0] / quotient(x[1])), reverse=True)
        re_ranked = [w for w in re_ranked if word_filter(w[1])]
        return re_ranked

    ranked_topics = []

    for i in range(0, lda.num_topics):
        topic = lda.show_topic(i, topn=max_words)
        words = re_rank(topic)
        ranked_topics.append(words[:n_words])

    return ranked_topics 
Example 53
Project: froide-campaign   Author: okfde   File: models.py    MIT License 5 votes vote down vote up
def get_search_vector(self):
        fields = [
            ('title', 'A'),
            ('search_text', 'A'),
        ]
        return functools.reduce(lambda a, b: a + b, [
            SearchVector(
                f, weight=w, config=self.SEARCH_LANG) for f, w in fields]) 
Example 54
Project: sic   Author: Yanixos   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc)) 
Example 55
Project: sic   Author: Yanixos   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc)) 
Example 56
Project: sic   Author: Yanixos   File: enum.py    GNU General Public License v3.0 5 votes vote down vote up
def __invert__(self):
        members, uncovered = _decompose(self.__class__, self._value_)
        inverted_members = [
                m for m in self.__class__
                if m not in members and not m._value_ & self._value_
                ]
        inverted = reduce(_or_, inverted_members, self.__class__(0))
        return self.__class__(inverted) 
Example 57
Project: bigquerylayers   Author: smandaric   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc)) 
Example 58
Project: rbnfrbnf   Author: thautwarm   File: graph_ir3.py    MIT License 5 votes vote down vote up
def merge(lst) -> tuple:
    lst = process_optional(lst)
    if len(lst) is 1:
        return reduce(Chain, lst[0]),
    groups = _group_by(lst)

    def _():
        for k, v in groups.items():
            if v:
                yield MultiParents(k, merge(v))
            else:
                yield k

    return tuple(_()) 
Example 59
Project: face_rekognition   Author: cnidus   File: ImageStat.py    GNU General Public License v3.0 5 votes vote down vote up
def _getcount(self):
        "Get total number of pixels in each layer"

        v = []
        for i in range(0, len(self.h), 256):
            v.append(functools.reduce(operator.add, self.h[i:i+256]))
        return v 
Example 60
Project: face_rekognition   Author: cnidus   File: ImageOps.py    GNU General Public License v3.0 5 votes vote down vote up
def equalize(image, mask=None):
    """
    Equalize the image histogram. This function applies a non-linear
    mapping to the input image, in order to create a uniform
    distribution of grayscale values in the output image.

    :param image: The image to equalize.
    :param mask: An optional mask.  If given, only the pixels selected by
                 the mask are included in the analysis.
    :return: An image.
    """
    if image.mode == "P":
        image = image.convert("RGB")
    h = image.histogram(mask)
    lut = []
    for b in range(0, len(h), 256):
        histo = [_f for _f in h[b:b+256] if _f]
        if len(histo) <= 1:
            lut.extend(list(range(256)))
        else:
            step = (functools.reduce(operator.add, histo) - histo[-1]) // 255
            if not step:
                lut.extend(list(range(256)))
            else:
                n = step // 2
                for i in range(256):
                    lut.append(n // step)
                    n = n + h[i+b]
    return _lut(image, lut) 
Example 61
Project: face_rekognition   Author: cnidus   File: ImageFilter.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, size, kernel, scale=None, offset=0):
        if scale is None:
            # default scale is sum of kernel
            scale = functools.reduce(lambda a, b: a+b, kernel)
        if size[0] * size[1] != len(kernel):
            raise ValueError("not enough coefficients in kernel")
        self.filterargs = size, scale, offset, kernel 
Example 62
Project: AshsSDK   Author: thehappydinoa   File: __init__.py    MIT License 5 votes vote down vote up
def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc)) 
Example 63
Project: AshsSDK   Author: thehappydinoa   File: __init__.py    MIT License 5 votes vote down vote up
def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc)) 
Example 64
Project: facebook-wda   Author: openatx   File: __init__.py    MIT License 5 votes vote down vote up
def urljoin(*urls):
    """
    The default urlparse.urljoin behavior look strange
    Standard urlparse.urljoin('http://a.com/foo', '/bar')
    Expect: http://a.com/foo/bar
    Actually: http://a.com/bar

    This function fix that.
    """
    return reduce(_urljoin, [u.strip('/') + '/' for u in urls if u.strip('/')],
                  '').rstrip('/') 
Example 65
Project: RLBotPack   Author: RLBot   File: vector_math.py    MIT License 5 votes vote down vote up
def to_rotation_matrix(pitch, yaw, roll):
    # Note: Unreal engine coordinate system
    y=pitch
    cosy = math.cos(y)
    siny = math.sin(y)
    mat_pitch = np.array(
            [[cosy, 0, -siny],
             [0, 1, 0],
             [siny, 0, cosy]])

    z=yaw
    cosz = math.cos(z)
    sinz = math.sin(z)
    mat_yaw = np.array(
            [[cosz, -sinz, 0],
             [sinz, cosz, 0],
             [0, 0, 1]])

    x=roll
    cosx = math.cos(x)
    sinx = math.sin(x)
    mat_roll = np.array(
            [[1, 0, 0],
             [0, cosx, sinx],
             [0, -sinx, cosx]])

    return reduce(np.dot, [mat_yaw, mat_pitch, mat_roll]) 
Example 66
Project: deep-verify   Author: deepmind   File: standard_layer_calcs.py    Apache License 2.0 5 votes vote down vote up
def _prod(lst):
  return functools.reduce(operator.mul, lst, 1) 
Example 67
Project: deep-verify   Author: deepmind   File: common.py    Apache License 2.0 5 votes vote down vote up
def _prod(lst):
  return functools.reduce(operator.mul, lst, 1) 
Example 68
Project: fine-lm   Author: akzaidi   File: rl.py    MIT License 5 votes vote down vote up
def feed_forward_gaussian_fun(action_space, config, observations):
  """Feed-forward Gaussian."""
  if not isinstance(action_space, gym.spaces.box.Box):
    raise ValueError("Expecting continuous action space.")

  mean_weights_initializer = tf.contrib.layers.variance_scaling_initializer(
      factor=config.init_mean_factor)
  logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)

  flat_observations = tf.reshape(observations, [
      tf.shape(observations)[0], tf.shape(observations)[1],
      functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])

  with tf.variable_scope("network_parameters"):
    with tf.variable_scope("policy"):
      x = flat_observations
      for size in config.policy_layers:
        x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
      mean = tf.contrib.layers.fully_connected(
          x, action_space.shape[0], tf.tanh,
          weights_initializer=mean_weights_initializer)
      logstd = tf.get_variable(
          "logstd", mean.shape[2:], tf.float32, logstd_initializer)
      logstd = tf.tile(
          logstd[None, None],
          [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
    with tf.variable_scope("value"):
      x = flat_observations
      for size in config.value_layers:
        x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
      value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
  mean = tf.check_numerics(mean, "mean")
  logstd = tf.check_numerics(logstd, "logstd")
  value = tf.check_numerics(value, "value")

  policy = tf.contrib.distributions.MultivariateNormalDiag(mean,
                                                           tf.exp(logstd))

  return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2)) 
Example 69
Project: fine-lm   Author: akzaidi   File: rl.py    MIT License 5 votes vote down vote up
def feed_forward_cnn_small_categorical_fun(action_space, config, observations):
  """Small cnn network with categorical output."""
  del config
  obs_shape = common_layers.shape_list(observations)
  x = tf.reshape(observations, [-1] + obs_shape[2:])

  with tf.variable_scope("network_parameters"):
    with tf.variable_scope("feed_forward_cnn_small"):
      x = tf.to_float(x) / 255.0
      x = tf.contrib.layers.conv2d(x, 32, [5, 5], [2, 2],
                                   activation_fn=tf.nn.relu, padding="SAME")
      x = tf.contrib.layers.conv2d(x, 32, [5, 5], [2, 2],
                                   activation_fn=tf.nn.relu, padding="SAME")

      flat_x = tf.reshape(
          x, [obs_shape[0], obs_shape[1],
              functools.reduce(operator.mul, x.shape.as_list()[1:], 1)])

      x = tf.contrib.layers.fully_connected(flat_x, 128, tf.nn.relu)

      logits = tf.contrib.layers.fully_connected(x, action_space.n,
                                                 activation_fn=None)

      value = tf.contrib.layers.fully_connected(
          x, 1, activation_fn=None)[..., 0]
      policy = tf.contrib.distributions.Categorical(logits=logits)

  return NetworkOutput(policy, value, lambda a: a) 
Example 70
Project: timeseries-mock   Author: ruivieira   File: app.py    Apache License 2.0 5 votes vote down vote up
def _parse_structure(conf):
    structures = []
    m0 = []
    anomalies = []

    for structure in conf:
        _structure, _anomalies, _m0 = _parse_component(structure)
        m0.extend(_m0)
        anomalies.extend(_anomalies)
        structures.append(_structure)

    m0 = np.array(m0)
    C0 = np.eye(len(m0))

    return reduce((lambda x, y: x + y), structures), m0, C0, anomalies 
Example 71
Project: lirpg   Author: Hwhitetooth   File: ddpg.py    MIT License 5 votes vote down vote up
def setup_actor_optimizer(self):
        logger.info('setting up actor optimizer')
        self.actor_loss = -tf.reduce_mean(self.critic_with_actor_tf)
        actor_shapes = [var.get_shape().as_list() for var in self.actor.trainable_vars]
        actor_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in actor_shapes])
        logger.info('  actor shapes: {}'.format(actor_shapes))
        logger.info('  actor params: {}'.format(actor_nb_params))
        self.actor_grads = U.flatgrad(self.actor_loss, self.actor.trainable_vars, clip_norm=self.clip_norm)
        self.actor_optimizer = MpiAdam(var_list=self.actor.trainable_vars,
            beta1=0.9, beta2=0.999, epsilon=1e-08) 
Example 72
Project: StyleGAN   Author: mgmk2   File: wscale_conv.py    Apache License 2.0 5 votes vote down vote up
def build(self, input_shape):
        super(ScaledConv, self).build(input_shape)
        if self.use_wscale:
            if self.data_format == 'channels_first':
                fan_in = reduce(mul, self.kernel_size) * int(input_shape[1])
            else:
                fan_in = reduce(mul, self.kernel_size) * int(input_shape[-1])
            self.coeff = np.sqrt(2 / fan_in)
        else:
            self.coeff = 1.0 
Example 73
Project: StyleGAN   Author: mgmk2   File: spectral_normalization_conv.py    Apache License 2.0 5 votes vote down vote up
def build(self, input_shape):
        super(SNConv, self).build(input_shape)
        if self.use_wscale:
            fan_in = reduce(mul, self.kernel_size) * int(input_shape[-1])
            self.coeff = np.sqrt(2 / fan_in)
        else:
            self.coeff = 1.0

        input_shape = tensor_shape.TensorShape(input_shape)
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape.dims[channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[channel_axis])
        kernel_shape = self.kernel_size + (input_dim, self.filters)
        singular_vector_shape = (1, reduce(mul, self.kernel_size) * input_dim)

        self.u = self.add_weight(
            name='singular_vector',
            shape=singular_vector_shape,
            initializer=self.singular_vector_initializer,
            trainable=False,
            aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA,
            dtype=self.dtype) 
Example 74
Project: fs_image   Author: facebookincubator   File: test_extent.py    MIT License 4 votes vote down vote up
def test_leaf_commutativity(self):
        clone = (Extent.empty()
            .write(offset=0, length=2)
            .write(offset=3, length=1))
        self.assertEqual('d2h1d1', repr(clone))
        # If we treat as identical holes of different provenance but the
        # same length, these operations should commute since they write data
        # to nonoverlapping regions.
        ops = [
            lambda e: e.write(offset=3, length=5),
            lambda e: e.truncate(length=25),
            lambda e: e.write(offset=1, length=1),
            lambda e: e.write(offset=17, length=2),
            lambda e: e.clone(
                to_offset=10,
                from_extent=clone, from_offset=0, length=clone.length,
            ),
        ]

        def compose_ops(ops_it):
            return functools.reduce(
                lambda extent, op: op(extent), ops_it, Extent.empty(),
            )

        # All permutations make distinct nestings with the same leaf structure
        all_extents = {compose_ops(p) for p in itertools.permutations(ops)}
        self.assertEqual(math.factorial(len(ops)), len(all_extents))
        self.assertEqual(
            {(
                1,
                (0, 1, Extent(content=Extent.Kind.DATA, offset=0, length=1)),
                1,
                (0, 5, Extent(content=Extent.Kind.DATA, offset=0, length=5)),
                2,
                (0, 2, Extent(content=Extent.Kind.DATA, offset=0, length=2)),
                1,
                (0, 1, Extent(content=Extent.Kind.DATA, offset=0, length=1)),
                3,
                (0, 2, Extent(content=Extent.Kind.DATA, offset=0, length=2)),
                6,
            )},
            {
                tuple(
                    # Different permutations produce the same-length holes
                    # differently, so let's only compare lengths.
                    l if se.content == Extent.Kind.HOLE else (o, l, se)
                        for o, l, se in e.gen_trimmed_leaves()
                ) for e in all_extents
            },
        )

    # test_write_and_clone covers leaf identity, but it's still nice to
    # explicitly check that the whole nested object is cloned. 
Example 75
Project: models   Author: kipoi   File: convert_Basset_to_pytorch.py    MIT License 4 votes vote down vote up
def torch_to_pytorch(t7_filename,outputname=None, save_output_to_file = True):
    model = load_lua(t7_filename,unknown_classes=True)
    if type(model).__name__=='hashable_uniq_dict': model=model.model
    model.gradInput = None
    slist = lua_recursive_source(lnn.Sequential().add(model))
    s = simplify_source(slist)
    header = '''
import torch
import torch.nn as nn
import torch.legacy.nn as lnn

from functools import reduce
from torch.autograd import Variable

class LambdaBase(nn.Sequential):
    def __init__(self, fn, *args):
        super(LambdaBase, self).__init__(*args)
        self.lambda_func = fn

    def forward_prepare(self, input):
        output = []
        for module in self._modules.values():
            output.append(module(input))
        return output if output else input

class Lambda(LambdaBase):
    def forward(self, input):
        return self.lambda_func(self.forward_prepare(input))

class LambdaMap(LambdaBase):
    def forward(self, input):
        return list(map(self.lambda_func,self.forward_prepare(input)))

class LambdaReduce(LambdaBase):
    def forward(self, input):
        return reduce(self.lambda_func,self.forward_prepare(input))
'''
    varname = t7_filename.replace('.t7','').replace('.','_').replace('-','_')
    s = '{}\n\n{} = {}'.format(header,varname,s[:-2])
    if save_output_to_file:
        if outputname is None: outputname=varname
        with open(outputname+'.py', "w") as pyfile:
            pyfile.write(s)

    n = nn.Sequential()
    lua_recursive_model(model,n)
    if save_output_to_file:
        torch.save(n.state_dict(),outputname+'.pth')
    return n 
Example 76
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_module.py    Apache License 2.0 4 votes vote down vote up
def test_monitor():
    # data iter
    data = mx.nd.array([[0.05, .10]]);
    label = mx.nd.array([[.01, 0.99]]);
    train_data = mx.io.NDArrayIter(data, label, batch_size=1)

    # symbols
    x = mx.symbol.Variable('data')
    x = mx.symbol.FullyConnected(name='fc_0', data=x, num_hidden=2)
    x = mx.symbol.Activation(name="act_0", data=x, act_type='sigmoid')
    x = mx.symbol.FullyConnected(name='fc_1', data=x, num_hidden=2)
    x = mx.symbol.Activation(name="act_1", data=x, act_type='sigmoid')
    x = mx.symbol.LinearRegressionOutput(data=x, name='softmax', grad_scale=2)

    # create monitor
    def mean_abs(x):
        sum_abs = mx.ndarray.sum(mx.ndarray.abs(x))
        return mx.ndarray.divide(sum_abs, reduce(lambda x, y: x * y, x.shape))
    mon = mx.mon.Monitor(1, stat_func=mean_abs, pattern='.*', sort=True)

    # create module
    mod = mx.mod.Module(x, context=[mx.cpu()]);
    mod.bind(train_data.provide_data, label_shapes=train_data.provide_label,
                    for_training=True)
    mod.install_monitor(mon)
    arg_params = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
                  'fc_0_bias'  : mx.nd.array([.35, .35]),
                  'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),
                  'fc_1_bias'  : mx.nd.array([.60, .60])}
    mod.init_params(arg_params=arg_params)

    data_iter = iter(train_data)
    data_batch = next(data_iter)
    mon.tic()
    mod.forward_backward(data_batch)
    res = mon.toc()
    keys = ['act_0', 'act_1', 'data', 'fc_0', 'fc_1', 'softmax']
    mon_result_counts = [0, 0, 0, 0, 0, 0]
    assert(len(res) == 21)
    for n, k, v in res:
        for idx, key in enumerate(keys):
            if k.startswith(key):
                mon_result_counts[idx] += 1
                break
    assert(mon_result_counts == [2, 2, 1, 6, 6, 4]) 
Example 77
Project: soccer-matlab   Author: utra-robosoccer   File: networks.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def feed_forward_gaussian(
    config, action_size, observations, unused_length, state=None):
  """Independent feed forward networks for policy and value.

  The policy network outputs the mean action and the log standard deviation
  is learned as independent parameter vector.

  Args:
    config: Configuration object.
    action_size: Length of the action vector.
    observations: Sequences of observations.
    unused_length: Batch of sequence lengths.
    state: Batch of initial recurrent states.

  Returns:
    NetworkOutput tuple.
  """
  mean_weights_initializer = tf.contrib.layers.variance_scaling_initializer(
      factor=config.init_mean_factor)
  logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
  flat_observations = tf.reshape(observations, [
      tf.shape(observations)[0], tf.shape(observations)[1],
      functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
  with tf.variable_scope('policy'):
    x = flat_observations
    for size in config.policy_layers:
      x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
    mean = tf.contrib.layers.fully_connected(
        x, action_size, tf.tanh,
        weights_initializer=mean_weights_initializer)
    logstd = tf.get_variable(
        'logstd', mean.shape[2:], tf.float32, logstd_initializer)
    logstd = tf.tile(
        logstd[None, None],
        [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
  with tf.variable_scope('value'):
    x = flat_observations
    for size in config.value_layers:
      x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
    value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
  mean = tf.check_numerics(mean, 'mean')
  logstd = tf.check_numerics(logstd, 'logstd')
  value = tf.check_numerics(value, 'value')
  policy = tf.contrib.distributions.MultivariateNormalDiag(
      mean, tf.exp(logstd))
  return NetworkOutput(policy, mean, logstd, value, state) 
Example 78
Project: soccer-matlab   Author: utra-robosoccer   File: networks.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def recurrent_gaussian(
    config, action_size, observations, length, state=None):
  """Independent recurrent policy and feed forward value networks.

  The policy network outputs the mean action and the log standard deviation
  is learned as independent parameter vector. The last policy layer is
  recurrent and uses a GRU cell.

  Args:
    config: Configuration object.
    action_size: Length of the action vector.
    observations: Sequences of observations.
    length: Batch of sequence lengths.
    state: Batch of initial recurrent states.

  Returns:
    NetworkOutput tuple.
  """
  mean_weights_initializer = tf.contrib.layers.variance_scaling_initializer(
      factor=config.init_mean_factor)
  logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
  cell = tf.contrib.rnn.GRUBlockCell(config.policy_layers[-1])
  flat_observations = tf.reshape(observations, [
      tf.shape(observations)[0], tf.shape(observations)[1],
      functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
  with tf.variable_scope('policy'):
    x = flat_observations
    for size in config.policy_layers[:-1]:
      x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
    x, state = tf.nn.dynamic_rnn(cell, x, length, state, tf.float32)
    mean = tf.contrib.layers.fully_connected(
        x, action_size, tf.tanh,
        weights_initializer=mean_weights_initializer)
    logstd = tf.get_variable(
        'logstd', mean.shape[2:], tf.float32, logstd_initializer)
    logstd = tf.tile(
        logstd[None, None],
        [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
  with tf.variable_scope('value'):
    x = flat_observations
    for size in config.value_layers:
      x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
    value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
  mean = tf.check_numerics(mean, 'mean')
  logstd = tf.check_numerics(logstd, 'logstd')
  value = tf.check_numerics(value, 'value')
  policy = tf.contrib.distributions.MultivariateNormalDiag(
      mean, tf.exp(logstd))
  # assert state.shape.as_list()[0] is not None
  return NetworkOutput(policy, mean, logstd, value, state) 
Example 79
Project: fleeg-platform   Author: Fleeg   File: utils.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def get_page_info(url):
    page = Article(url)
    page_og = OpenGraph()
    image_url = None
    global_type = None
    page_content = None

    def get_page_head():
        headers = {
            'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.2 '
                          '(KHTML, like Gecko) Chrome/15.0.874.121 Safari/535.2',
        }

        try:
            resp_headers = requests.head(url, headers=headers).headers
        except requests.exceptions.RequestException:
            raise LinkException('Failed to read link.')
        return resp_headers

    def get_title_from_url():
        name_url = splitext(basename(urlsplit(url).path))[0]
        words = re.findall(r'[a-zA-Z0-9]+', name_url)
        return ' '.join([word.capitalize() for word in words])

    def summary_from_text(txt, size=250):
        return txt[:size] if isinstance(txt, str) and len(txt) > size else txt

    def build_tags(*args):
        tags = reduce(operator.add, args)
        return list(filter(lambda x: bool(x), set(tags)))

    page_type, page_subtype = get_page_head()['Content-Type'].split('/')
    page_subtype = re.findall(r'[a-zA-Z0-9]+', page_subtype)[0]

    if page_type == 'image':
        image_url = url
        global_type = page_type

    if page_type == 'text':
        page.download()
        page_content = page.html

        if page_subtype == 'html':
            page_og = OpenGraph(html=page_content)
            page.parse()

    page_text = page.text or page_content

    return {
        'type': page_og.type or global_type or page_subtype,
        'title': page_og.title or page.title or get_title_from_url(),
        'summary': page_og.description or page.meta_description or summary_from_text(page_text),
        'image': page_og.image or page.meta_img or page.top_image or image_url,
        'tags': build_tags(page.meta_keywords, list(page.tags)),
        'publish_date': page.publish_date or None,
        'text': page_text,
    } 
Example 80
Project: daqnet   Author: adamgreig   File: ip.py    MIT License 4 votes vote down vote up
def elaborate(self, platform):
        mem_port = self.ip_stack.user_r_port
        udp_port = self.ip_stack.user_udp_port
        udp_len = self.ip_stack.user_udp_len
        dst_mac = self.ip_stack.user_last_mac
        dst_ip4 = self.ip_stack.user_last_ip4
        dst_udp_port = self.ip_stack.user_last_port

        self.m = Module()
        self.m.submodules.ipchecksum = ipchecksum = _InternetChecksum()

        with self.m.FSM() as fsm:
            # Wire the IPChecksum to update when we are in the IPv4 header
            # states. Note that the first byte is state 1, so state 15 is
            # the first byte after the 14-byte Ethernet header.
            self.m.d.comb += [
                ipchecksum.data.eq(self.tx_data),
                ipchecksum.lowbyte.eq(self.tx_addr[0]),
                ipchecksum.reset.eq(self.done),
                ipchecksum.en.eq(functools.reduce(
                    operator.or_, [fsm.ongoing(x) for x in range(15, 33)])
                ),
            ]

            self.start_fsm()
            self.write("DST_MAC", val=dst_mac, n=6, dst=0)
            self.write("SRC_MAC", val=self.ip_stack.mac_addr, n=6, dst=6)
            self.write("ETYPE", val=0x0800, n=2, dst=12)
            self.write("VER_IHL", val=0x45, n=1, dst=14)
            self.write("DSCP_ECN", val=0, n=1, dst=15)
            self.write("TOTAL_LENGTH", val=udp_len+28, n=2, dst=16)
            self.write("IDENT", val=0, n=2, dst=18)
            self.write("FRAG", val=0, n=2, dst=20)
            self.write("TTL", val=64, n=1, dst=22)
            self.write("PROTO", val=0x11, n=1, dst=23)
            self.write("SRC_IP", val=self.ip_stack.ip4_addr, n=4, dst=26)
            self.write("DST_IP", val=dst_ip4, n=4, dst=30)
            self.write("CHECKSUM", val=ipchecksum.checksum, n=2, dst=24)
            self.write("SRC_PORT", val=udp_port, n=2, dst=34)
            self.write("DST_PORT", val=dst_udp_port, n=2, dst=36)
            self.write("UDP_LEN", val=udp_len+8, n=2, dst=38)
            self.write("UDP_CHK", val=0x0000, n=2, dst=40)
            if mem_port is not None:
                self.write_from_mem("DATA", mem_port, src=0, dst=42, n=udp_len)
            self.end_fsm(send=True, tx_len=udp_len+42)

        return self.m