Python six.MAXSIZE Examples

The following are 30 code examples of six.MAXSIZE(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module six , or try the search function .
Example #1
Source File: short_sentence_similarity.py    From Semantic-Texual-Similarity-Toolkits with MIT License 6 votes vote down vote up
def length_dist(synset_1, synset_2):
    """
    Return a measure of the length of the shortest path in the semantic
    ontology (Wordnet in our case as well as the paper's) between two
    synsets.
    """
    l_dist = six.MAXSIZE
    if synset_1 is None or synset_2 is None:
        return 0.0
    if synset_1 == synset_2:
        # if synset_1 and synset_2 are the same synset return 0
        l_dist = 0.0
    else:
        wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
        wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
        if len(wset_1.intersection(wset_2)) > 0:
            # if synset_1 != synset_2 but there is word overlap, return 1.0
            l_dist = 1.0
        else:
            # just compute the shortest path between the two
            l_dist = synset_1.shortest_path_distance(synset_2)
            if l_dist is None:
                l_dist = 0.0
    # normalize path length to the range [0,1]
    return math.exp(-ALPHA * l_dist) 
Example #2
Source File: elements.py    From ChemDataExtractor with MIT License 6 votes vote down vote up
def scan(self, tokens, max_matches=six.MAXSIZE, overlap=False):
        """"""
        if not self.streamlined:
            self.streamline()
        matches = 0
        i = 0
        length = len(tokens)
        while i < length and matches < max_matches:
            try:
                results, next_i = self.parse(tokens, i)
            except ParseException as err:
                i += 1
            else:
                if next_i > i:
                    matches += 1
                    if len(results) == 1:
                        results = results[0]
                    yield results, i, next_i
                    if overlap:
                        i += 1
                    else:
                        i = next_i
                else:
                    i += 1 
Example #3
Source File: train.py    From mead-baseline with Apache License 2.0 6 votes vote down vote up
def __init__(self, model, **kwargs):
        super().__init__()
        if type(model) is dict:
            model = create_model_for('tagger', **model)
        self.grad_accum = int(kwargs.get('grad_accum', 1))
        self.gpus = int(kwargs.get('gpus', 1))
        # By default support IOB1/IOB2
        self.span_type = kwargs.get('span_type', 'iob')
        self.verbose = kwargs.get('verbose', False)

        logger.info('Setting span type %s', self.span_type)
        self.model = model
        self.idx2label = revlut(self.model.labels)
        self.clip = float(kwargs.get('clip', 5))
        self.optimizer = OptimizerManager(self.model, **kwargs)
        if self.gpus > 1:
            logger.info("Trainer for PyTorch tagger currently doesnt support multiple GPUs.  Setting to 1")
            self.gpus = 1
        if self.gpus > 0 and self.model.gpu:
            self.model = model.cuda()
        else:
            logger.warning("Requested training on CPU.  This will be slow.")

        self.nsteps = kwargs.get('nsteps', six.MAXSIZE) 
Example #4
Source File: test_six.py    From c4ddev with MIT License 5 votes vote down vote up
def test_MAXSIZE():
    try:
        # This shouldn't raise an overflow error.
        six.MAXSIZE.__index__()
    except AttributeError:
        # Before Python 2.6.
        pass
    py.test.raises(
        (ValueError, OverflowError),
        operator.mul, [None], six.MAXSIZE + 1) 
Example #5
Source File: imageutil.py    From Gooey with MIT License 5 votes vote down vote up
def resizeImage(im, targetHeight):
    im.thumbnail((six.MAXSIZE, targetHeight))
    return im 
Example #6
Source File: short_sentence_similarity.py    From Semantic-Texual-Similarity-Toolkits with MIT License 5 votes vote down vote up
def hierarchy_dist(synset_1, synset_2):
    """
    Return a measure of depth in the ontology to model the fact that
    nodes closer to the root are broader and have less semantic similarity
    than nodes further away from the root.
    """
    h_dist = six.MAXSIZE
    if synset_1 is None or synset_2 is None:
        return h_dist
    if synset_1 == synset_2:
        # return the depth of one of synset_1 or synset_2
        h_dist = max([x[1] for x in synset_1.hypernym_distances()])
    else:
        # find the max depth of least common subsumer
        hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
        hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
        lcs_candidates = set(hypernyms_1.keys()).intersection(
            set(hypernyms_2.keys()))
        if len(lcs_candidates) > 0:
            lcs_dists = []
            for lcs_candidate in lcs_candidates:
                lcs_d1 = 0
                if lcs_candidate in hypernyms_1 :
                    lcs_d1 = hypernyms_1[lcs_candidate]
                lcs_d2 = 0
                if lcs_candidate in hypernyms_2:
                    lcs_d2 = hypernyms_2[lcs_candidate]
                lcs_dists.append(max([lcs_d1, lcs_d2]))
            h_dist = max(lcs_dists)
        else:
            h_dist = 0
    return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /
        (math.exp(BETA * h_dist) + math.exp(-BETA * h_dist))) 
Example #7
Source File: test_six.py    From six with MIT License 5 votes vote down vote up
def test_integer_types():
    assert isinstance(1, six.integer_types)
    assert isinstance(-1, six.integer_types)
    assert isinstance(six.MAXSIZE + 23, six.integer_types)
    assert not isinstance(.1, six.integer_types) 
Example #8
Source File: test_six.py    From six with MIT License 5 votes vote down vote up
def test_MAXSIZE():
    try:
        # This shouldn't raise an overflow error.
        six.MAXSIZE.__index__()
    except AttributeError:
        # Before Python 2.6.
        pass
    pytest.raises(
        (ValueError, OverflowError),
        operator.mul, [None], six.MAXSIZE + 1) 
Example #9
Source File: test_int2str.py    From geohash-hilbert with MIT License 5 votes vote down vote up
def test_randoms(bpc):
    prev_code = None
    for _i in range(100):
        i = randint(0, six.MAXSIZE)
        code = encode_int(i, bpc)
        assert isinstance(code, six.text_type)
        assert code != i
        assert i == decode_int(code, bpc)

        if prev_code is not None:
            assert code != prev_code

        prev_code = code 
Example #10
Source File: plugin.py    From pytest-cloud with MIT License 5 votes vote down vote up
def get_node_specs(node, host, caps, python=None, chdir=None, mem_per_process=None, max_processes=None):
    """Get single node specs.

    Executed on the master node side.

    :param node: node name in form <username>@<hostname>
    :type node: str
    :param host: hostname of the node
    :type host: str
    :param python: python executable name to use on the remote side
    :type python: str
    :param chdir: relative path where to run (and sync) tests on the remote side
    :type chdir: str
    :param mem_per_process: optional amount of memory per process needed, in megabytest
    :type mem_per_process: int
    :param max_processes: optional maximum number of processes per test node
    :type max_processes: int

    :return: `list` of test gateway specs for single test node in form ['1*ssh=<node>//id=<hostname>_<index>', ...]
    :rtype: list
    """
    count = min(max_processes or six.MAXSIZE, caps['cpu_count'])
    if mem_per_process:
        count = min(int(math.floor(caps['virtual_memory']['available'] / mem_per_process)), count)
    for index in range(count):
        fmt = 'ssh={node}//id={host}_{index}//chdir={chdir}//python={python}'
        yield fmt.format(
            count=count,
            node=node,
            host=host,
            index=index,
            chdir=chdir,
            python=python) 
Example #11
Source File: test_six.py    From c4ddev with MIT License 5 votes vote down vote up
def test_integer_types():
    assert isinstance(1, six.integer_types)
    assert isinstance(-1, six.integer_types)
    assert isinstance(six.MAXSIZE + 23, six.integer_types)
    assert not isinstance(.1, six.integer_types) 
Example #12
Source File: test_rpc.py    From ryu with Apache License 2.0 5 votes vote down vote up
def test_0_call_int2(self):
        c = rpc.Client(self._client_sock)
        obj = six.MAXSIZE
        assert isinstance(obj, int)
        result = c.call(b'resp', [obj])
        assert result == obj
        assert isinstance(result, type(obj)) 
Example #13
Source File: module_explorer.py    From cloud-debug-python with Apache License 2.0 5 votes vote down vote up
def GetCodeObjectAtLine(module, line):
  """Searches for a code object at the specified line in the specified module.

  Args:
    module: module to explore.
    line: 1-based line number of the statement.

  Returns:
    (True, Code object) on success or (False, (prev_line, next_line)) on
    failure, where prev_line and next_line are the closest lines with code above
    and below the specified line, or None if they do not exist.
  """
  if not hasattr(module, '__file__'):
    return (False, (None, None))

  prev_line = 0
  next_line = six.MAXSIZE

  for code_object in _GetModuleCodeObjects(module):
    for co_line_number in _GetLineNumbers(code_object):
      if co_line_number == line:
        return (True, code_object)
      elif co_line_number < line:
        prev_line = max(prev_line, co_line_number)
      elif co_line_number > line:
        next_line = min(next_line, co_line_number)
        break

  prev_line = None if prev_line == 0 else prev_line
  next_line = None if next_line == six.MAXSIZE else next_line
  return (False, (prev_line, next_line)) 
Example #14
Source File: test_six.py    From data with GNU General Public License v3.0 5 votes vote down vote up
def test_integer_types():
    assert isinstance(1, six.integer_types)
    assert isinstance(-1, six.integer_types)
    assert isinstance(six.MAXSIZE + 23, six.integer_types)
    assert not isinstance(.1, six.integer_types) 
Example #15
Source File: test_six.py    From data with GNU General Public License v3.0 5 votes vote down vote up
def test_MAXSIZE():
    try:
        # This shouldn't raise an overflow error.
        six.MAXSIZE.__index__()
    except AttributeError:
        # Before Python 2.6.
        pass
    py.test.raises(
        (ValueError, OverflowError),
        operator.mul, [None], six.MAXSIZE + 1) 
Example #16
Source File: test_six.py    From data with GNU General Public License v3.0 5 votes vote down vote up
def test_integer_types():
    assert isinstance(1, six.integer_types)
    assert isinstance(-1, six.integer_types)
    assert isinstance(six.MAXSIZE + 23, six.integer_types)
    assert not isinstance(.1, six.integer_types) 
Example #17
Source File: test_six.py    From data with GNU General Public License v3.0 5 votes vote down vote up
def test_MAXSIZE():
    try:
        # This shouldn't raise an overflow error.
        six.MAXSIZE.__index__()
    except AttributeError:
        # Before Python 2.6.
        pass
    py.test.raises(
        (ValueError, OverflowError),
        operator.mul, [None], six.MAXSIZE + 1) 
Example #18
Source File: test_rpc.py    From ryu with Apache License 2.0 5 votes vote down vote up
def test_0_call_int3(self):
        c = rpc.Client(self._client_sock)
        obj = - six.MAXSIZE - 1
        assert isinstance(obj, int)
        result = c.call(b'resp', [obj])
        assert result == obj
        assert isinstance(result, type(obj)) 
Example #19
Source File: test_decay.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def test_composite_calls_rest():
    warmup_steps = np.random.randint(50, 101)
    warm = MagicMock()
    warm.warmup_steps = warmup_steps
    rest = MagicMock()
    lr = CompositeLRScheduler(warm=warm, rest=rest)
    step = np.random.randint(warmup_steps + 1, six.MAXSIZE)
    _ = lr(step)
    warm.assert_not_called()
    rest.assert_called_once_with(step - warmup_steps) 
Example #20
Source File: eager.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def __init__(self, model_params, **kwargs):
        """Create a Trainer, and give it the parameters needed to instantiate the model

        :param model_params: The model parameters
        :param kwargs: See below

        :Keyword Arguments:

          * *nsteps* (`int`) -- If we should report every n-steps, this should be passed
          * *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
          * *clip* (`int`) -- If we are doing gradient clipping, what value to use
          * *optim* (`str`) -- The name of the optimizer we are using
          * *lr* (`float`) -- The learning rate we are using
          * *mom* (`float`) -- If we are using SGD, what value to use for momentum
          * *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
          * *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
          * *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8

        """
        super().__init__()

        if type(model_params) is dict:
            self.model = create_model_for('classify', **model_params)
        else:
            self.model = model_params

        self.optimizer = EagerOptimizer(loss, **kwargs)
        self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
        self._checkpoint = tf.train.Checkpoint(optimizer=self.optimizer.optimizer, model=self.model)
        checkpoint_dir = '{}-{}'.format("./tf-classify", os.getpid())

        self.checkpoint_manager = tf.train.CheckpointManager(self._checkpoint,
                                                             directory=checkpoint_dir,
                                                             max_to_keep=5) 
Example #21
Source File: distributed.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def __init__(self, model_params, **kwargs):
        """Create a Trainer, and give it the parameters needed to instantiate the model

        :param model_params: The model parameters
        :param kwargs: See below

        :Keyword Arguments:

          * *nsteps* (`int`) -- If we should report every n-steps, this should be passed
          * *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
          * *clip* (`int`) -- If we are doing gradient clipping, what value to use
          * *optim* (`str`) -- The name of the optimizer we are using
          * *lr* (`float`) -- The learning rate we are using
          * *mom* (`float`) -- If we are using SGD, what value to use for momentum
          * *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
          * *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
          * *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8

        """
        super().__init__()

        self.gpus = int(kwargs.get('gpus', 1))
        if type(model_params) is dict:
            self.model = create_model_for('classify', **model_params)
        else:
            self.model = model_params

        self.optimizer = EagerOptimizer(loss, **kwargs)
        self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
        self._checkpoint = tf.train.Checkpoint(optimizer=self.optimizer.optimizer, model=self.model)
        checkpoint_dir = '{}-{}'.format("./tf-classify", os.getpid())

        self.checkpoint_manager = tf.train.CheckpointManager(self._checkpoint,
                                                             directory=checkpoint_dir,
                                                             max_to_keep=5)
        devices = ['/device:GPU:{}'.format(i) for i in range(self.gpus)]
        self.strategy = tf.distribute.MirroredStrategy(devices) 
Example #22
Source File: utils.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def __init__(self, model_params, **kwargs):
        """Create a Trainer, and give it the parameters needed to instantiate the model

        :param model_params: The model parameters
        :param kwargs: See below

        :Keyword Arguments:

          * *nsteps* (`int`) -- If we should report every n-steps, this should be passed
          * *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
          * *clip* (`int`) -- If we are doing gradient clipping, what value to use
          * *optim* (`str`) -- The name of the optimizer we are using
          * *lr* (`float`) -- The learning rate we are using
          * *mom* (`float`) -- If we are using SGD, what value to use for momentum
          * *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
          * *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
          * *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8

        """
        super().__init__()
        if type(model_params) is dict:
            self.model = create_model_for('tagger', **model_params)
        else:
            self.model = model_params
        self.sess = self.model.sess
        self.loss = self.model.create_loss()
        span_type = kwargs.get('span_type', 'iob')
        verbose = kwargs.get('verbose', False)
        self.evaluator = TaggerEvaluatorTf(self.model, span_type, verbose)
        self.global_step, self.train_op = optimizer(self.loss, colocate_gradients_with_ops=True, variables=self.model.trainable_variables, **kwargs)
        self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
        tables = tf.compat.v1.tables_initializer()
        self.model.sess.run(tables)
        init = tf.compat.v1.global_variables_initializer()
        self.model.sess.run(init)
        saver = tf.compat.v1.train.Saver()
        self.model.save_using(saver)
        checkpoint = kwargs.get('checkpoint')
        if checkpoint is not None:
            skip_blocks = kwargs.get('blocks_to_skip', ['OptimizeLoss'])
            reload_checkpoint(self.model.sess, checkpoint, skip_blocks) 
Example #23
Source File: utils.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def _try_user_cmp(user_cmp):
    user_cmp = user_cmp.lower()
    if user_cmp in {"lt", "less", "less than", "<", "less_than"}:
        return lt, six.MAXSIZE
    if user_cmp in {"le", "lte", "<="}:
        return le, six.MAXSIZE
    if user_cmp in {"ge", "gte", ">="}:
        return ge, -six.MAXSIZE - 1
    return gt, -six.MAXSIZE - 1 
Example #24
Source File: utils.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def get_metric_cmp(metric, user_cmp=None, less_than_metrics=LESS_THAN_METRICS):
    if user_cmp is not None:
        return _try_user_cmp(user_cmp)
    if metric in less_than_metrics:
        return lt, six.MAXSIZE
    return gt, -six.MAXSIZE - 1 
Example #25
Source File: train.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def __init__(self, model, **kwargs):

        if type(model) is dict:
            model = create_model_for('classify', **model)
        super().__init__()
        if type(model) is dict:
            model = create_model_for('classify', **model)
        self.clip = float(kwargs.get('clip', 5))
        self.labels = model.labels
        self.gpus = int(kwargs.get('gpus', 1))
        if self.gpus == -1:
            self.gpus = len(os.getenv('CUDA_VISIBLE_DEVICES', os.getenv('NV_GPU', '0')).split(','))

        self.optimizer = OptimizerManager(model, **kwargs)
        self.model = model
        if self.gpus > 0 and self.model.gpu:
            self.crit = model.create_loss().cuda()
            if self.gpus > 1:
                self.model = torch.nn.DataParallel(model).cuda()
            else:
                self.model.cuda()
        else:
            logger.warning("Requested training on CPU.  This will be slow.")
            self.crit = model.create_loss()
            self.model = model
        self.nsteps = kwargs.get('nsteps', six.MAXSIZE) 
Example #26
Source File: import_geonames.py    From EpiTator with Apache License 2.0 5 votes vote down vote up
def read_geonames_csv():
    print("Downloading geoname data from: " + GEONAMES_ZIP_URL)
    try:
        url = request.urlopen(GEONAMES_ZIP_URL)
    except URLError:
        print("If you are operating behind a firewall, try setting the HTTP_PROXY/HTTPS_PROXY environment variables.")
        raise
    zipfile = ZipFile(BytesIO(url.read()))
    print("Download complete")
    # Loading geonames data may cause errors without setting csv.field_size_limit:
    if sys.platform == "win32":
        max_c_long_on_windows = (2**32 / 2) - 1
        csv.field_size_limit(max_c_long_on_windows)
    else:
        csv.field_size_limit(sys.maxint if six.PY2 else six.MAXSIZE)
    with zipfile.open('allCountries.txt') as f:
        reader = unicodecsv.DictReader(f,
                                       fieldnames=[
                                           k for k, v in geonames_field_mappings],
                                       encoding='utf-8',
                                       delimiter='\t',
                                       quoting=csv.QUOTE_NONE)
        for d in reader:
            d['population'] = parse_number(d['population'], 0)
            d['latitude'] = parse_number(d['latitude'], 0)
            d['longitude'] = parse_number(d['longitude'], 0)
            if len(d['alternatenames']) > 0:
                d['alternatenames'] = d['alternatenames'].split(',')
            else:
                d['alternatenames'] = []
            yield d 
Example #27
Source File: logclient_operator.py    From aliyun-log-python-sdk with MIT License 5 votes vote down vote up
def query_more(fn, offset, size, batch_size, *args):
    """list all data using the fn
    """
    if size < 0:
        expected_total_size = six.MAXSIZE
    else:
        expected_total_size = size
        batch_size = min(size, batch_size)

    response = None
    total_count_got = 0
    complete = False
    while True:
        ret = fn(*args, offset=offset, size=batch_size)

        if response is None:
            response = ret
        else:
            response.merge(ret)

        # if incompete, exit
        if not ret.is_completed():
            break

        count = ret.get_count()
        offset += count
        total_count_got += count
        batch_size = min(batch_size, expected_total_size - total_count_got)
        if count == 0 or total_count_got >= expected_total_size:
            break

    return response 
Example #28
Source File: logclient_operator.py    From aliyun-log-python-sdk with MIT License 5 votes vote down vote up
def list_more(fn, offset, size, batch_size, *args):
    """list all data using the fn
    """
    if size < 0:
        expected_total_size = six.MAXSIZE
    else:
        expected_total_size = size
        batch_size = min(size, batch_size)

    response = None
    total_count_got = 0
    while True:
        ret = fn(*args, offset=offset, size=batch_size)
        if response is None:
            response = ret
        else:
            response.merge(ret)

        count = ret.get_count()
        total = ret.get_total()
        offset += count
        total_count_got += count
        batch_size = min(batch_size, expected_total_size - total_count_got)

        if count == 0 or offset >= total or total_count_got >= expected_total_size:
            break

    return response 
Example #29
Source File: memlayout.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def allocate_best_fit(self, size):
        size = MemoryManager.align(size, self.alignment)
        best_node = None
        best_offset = None
        best_delta = six.MAXSIZE
        offset = 0
        for i, node in enumerate(self.node_list):
            delta = node.size - size
            if node.is_free and delta >= 0:
                if not best_node or delta < best_delta:
                    best_i = i
                    best_node = node
                    best_offset = offset
                    best_delta = delta
            offset += node.size

        if not best_node:
            raise RuntimeError("Bad Allocation")
        else:
            if best_delta == 0:
                best_node.is_free = False
            else:
                self.node_list[best_i].size -= size
                self.node_list.insert(best_i, MemoryNode(size, is_free=False))

        self.max_allocation = max(self.max_allocation, best_offset + size)
        return best_offset 
Example #30
Source File: memlayout.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def __init__(self, alignment):
        self.alignment = alignment
        self.node_list = [MemoryNode(six.MAXSIZE)]
        self.max_allocation = 0