Python itertools.product() Examples

The following are code examples for showing how to use itertools.product(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the ones you don't like. You can also save this page to your account.

Example 1
Project: fxnn   Author: khaotik   File: dataset.py    (MIT License) View Source Project 6 votes vote down vote up
def download(self, local_dir_=None, url_=None):
        '''
        Args:
            local_dir_: where to save downloaded file
            url_: where to download dataset, if None, use default 'http://yann.lecun.com/exdb/mnist/'
        '''
        # TODO check whether file exists
        if url_ is None:
            url_ = 'http://yann.lecun.com/exdb/mnist/'
        if local_dir_ is None:
            local_dir = self.DEFAULT_DIR
        else:
            local_dir = Path(local_dir_)
        local_dir.mkdir(parents=True, exist_ok=True)
        in_filename = '%(subset)s-%(type_s)s-idx%(ndim)s-ubyte.gz'
        for subset, (type_s, ndim) in product(
            ('train', 't10k'), zip(('images', 'labels'), (3,1))):
            filename = in_filename % locals()
            urllib.request.urlretrieve( url_ + filename, str(local_dir / filename)) 
Example 2
Project: treecat   Author: posterior   File: serving_test.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def test_server_logprob_normalized(N, V, C, M):
    model = generate_fake_model(N, V, C, M)
    config = TINY_CONFIG.copy()
    config['model_num_clusters'] = M
    model['config'] = config
    server = TreeCatServer(model)

    # The total probability of all categorical rows should be 1.
    ragged_index = model['suffstats']['ragged_index']
    factors = []
    for v in range(V):
        C = ragged_index[v + 1] - ragged_index[v]
        factors.append([one_hot(c, C) for c in range(C)])
    data = np.array(
        [np.concatenate(columns) for columns in itertools.product(*factors)],
        dtype=np.int8)
    logprobs = server.logprob(data)
    logtotal = np.logaddexp.reduce(logprobs)
    assert logtotal == pytest.approx(0.0, abs=1e-5) 
Example 3
Project: zellij   Author: nedbat   File: test_defuzz.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def test_correct_distance(start, ndigits, dimensions):
    eps = 1e-10
    window = 10 ** -ndigits
    smallest_different = 1.5 * window + eps
    largest_same = 0.5 * window - eps
    step = 10.09 * window
    for i in range(10):
        num = start + i * step
        pt = (num,) * dimensions
        for signs in itertools.product([-1, 0, 1], repeat=dimensions):
            if all(s == 0 for s in signs):
                continue
            # Need a new defuzzer for each attempt, or previous "should be
            # different" points will be close to the "should be same" point.
            dfz = Defuzzer(ndigits=ndigits)
            assert dfz.defuzz(pt) == pt
            st = tuple(num + s * largest_same for s in signs)
            dfzst = dfz.defuzz(st)
            assert dfzst == pt
            dt = tuple(num + s * smallest_different for s in signs)
            dfzdt = dfz.defuzz(dt)
            assert dfzdt != pt 
Example 4
Project: cellranger   Author: 10XGenomics   File: report.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, genomes):
        self.total_reads = 0.0
        self.unmapped_reads = 0.0
        self.good_umi_reads = 0.0
        self.good_bc_reads = 0.0
        self.corrected_bc_reads = 0.0
        self.genomes = genomes + [cr_constants.MULTI_REFS_PREFIX]
        self.regions = cr_constants.REGIONS
        genome_region_dict = lambda: {(g,r): 0.0 for g,r in itertools.product(self.genomes, self.regions)}
        genome_dict = lambda: {g: 0.0 for g in self.genomes}
        self.mapped_reads = genome_region_dict()
        self.conf_mapped_reads = genome_region_dict()
        self.conf_mapped_bc_reads = genome_region_dict()
        self.antisense_reads = genome_dict()
        self.discordant_pairs = genome_dict()
        self.genome_reads = genome_dict() 
Example 5
Project: cellranger   Author: 10XGenomics   File: report.py    (license) View Source Project 6 votes vote down vote up
def _get_metric_keys(self, name):
        metric_cls, metric_dict = self.metrics_dict[name]
        prefixes = metric_dict.get('prefixes', [])
        kwargs = metric_dict.get('kwargs', {})

        always_active = kwargs.get('always_active', False)

        parts = [[name]]
        for prefix in prefixes:
            prefix = getattr(self, prefix)

            if prefix:
                parts.append(prefix)

        # Check to make sure all specified metrics are present for metrics that are always active
        if always_active and len(parts) != len(prefixes) + 1:
            return []

        # Return the set of keys
        keys = set(itertools.product(*parts))

        # Add bare keys
        keys.add((name,))

        return keys 
Example 6
Project: netra   Author: akshah   File: PathAnalysis.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def analyze(self):
        
        results=[]
            
        #Making all of the possibilities
        allGraphs = product(*self.countries)
        
        for g in allGraphs:
            results.append(hasCycle(g))
        
        #Now searching for cycles
        #results = [hasCycle(graph) for graph in allGraphs]
        numCycles = results.count(True)
        
        #Seeing if it's definately an anomolous path
        if numCycles == len(results):
            self.result = 1
        #Seeing if it's only potentially anomolous    
        elif numCycles > 0:
            self.result = 2
        #Everying seems to be fine
        else:
            self.result = 0 
Example 7
Project: guided-filter   Author: lisabug   File: main.py    (MIT License) View Source Project 6 votes vote down vote up
def test_color():
    image = cv2.imread('data/Lenna.png')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    noise = (np.random.rand(image.shape[0], image.shape[1], 3) - 0.5) * 50
    image_noise = image + noise

    radius = [1, 2, 4]
    eps = [0.005]

    combs = list(itertools.product(radius, eps))

    vis.plot_single(to_32F(image), title='origin')
    vis.plot_single(to_32F(image_noise), title='noise')

    for r, e in combs:
        GF = GuidedFilter(image, radius=r, eps=e)
        vis.plot_single(to_32F(GF.filter(image_noise)), title='r=%d, eps=%.3f' % (r, e)) 
Example 8
Project: cloud-volume   Author: seung-lab   File: lib.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def xyzrange(start_vec, end_vec=None, stride_vec=(1,1,1)):
  if end_vec is None:
    end_vec = start_vec
    start_vec = (0,0,0)

  start_vec = np.array(start_vec, dtype=int)
  end_vec = np.array(end_vec, dtype=int)

  rangeargs = ( (start, end, stride) for start, end, stride in zip(start_vec, end_vec, stride_vec) )
  xyzranges = [ range(*arg) for arg in rangeargs ]
  
  # iterate then x first, then y, then z
  # this way you process in the xy plane slice by slice
  # but you don't create process lots of prefix-adjacent keys
  # since all the keys start with X
  zyxranges = xyzranges[::-1]

  def vectorize():
    pt = Vec(0,0,0)
    for z,y,x in product(*zyxranges):
      pt.x, pt.y, pt.z = x, y, z
      yield pt

  return vectorize() 
Example 9
Project: simple_rl   Author: david-abel   File: HanoiMDPClass.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def __init__(self, num_pegs=3, num_discs=3, gamma=0.95):
        '''
        Args:
            num_pegs (int)
            num_discs (int)
            gamma (float)
        '''
        self.num_pegs = num_pegs
        self.num_discs = num_discs
        HanoiMDP.ACTIONS = [str(x) + str(y) for x, y in itertools.product(xrange(self.num_pegs), xrange(self.num_pegs)) if x != y]

        # Setup init state.
        init_state = [" " for peg in xrange(num_pegs)]
        x = ""
        for i in xrange(num_discs):
            x += chr(97 + i)
        init_state[0] = x
        init_state = State(data=init_state)

        MDP.__init__(self, HanoiMDP.ACTIONS, self._transition_func, self._reward_func, init_state=init_state, gamma=gamma) 
Example 10
Project: dustbunny   Author: Teamworksapp   File: perms.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def __iter__(self):
        tally = []
        values = (pair[1] for pair in self.of)
        keys = [pair[0] for pair in self.of]
        
        # evaluate any transformers in order
        for i, v in enumerate(values):
            if callable(v):  # then create a permutation for everything 
                for p in [x for x in AllPerms(*tally)]:
                    tally.append((keys[i], v(**p)))
            else:
                tally.append((keys[i], v))
                
        values = (pair[1] for pair in tally)
        
        for tup in itertools.product(*values):
            yield dict(zip(keys, tup)) 
Example 11
Project: otRebuilder   Author: Pal3love   File: builder.py    (MIT License) View Source Project 6 votes vote down vote up
def add_ligature_subst(self, location,
                           prefix, glyphs, suffix, replacement, forceChain):
        if prefix or suffix or forceChain:
            chain = self.get_lookup_(location, ChainContextSubstBuilder)
            lookup = self.get_chained_lookup_(location, LigatureSubstBuilder)
            chain.substitutions.append((prefix, glyphs, suffix, [lookup]))
        else:
            lookup = self.get_lookup_(location, LigatureSubstBuilder)

        # OpenType feature file syntax, section 5.d, "Ligature substitution":
        # "Since the OpenType specification does not allow ligature
        # substitutions to be specified on target sequences that contain
        # glyph classes, the implementation software will enumerate
        # all specific glyph sequences if glyph classes are detected"
        for g in sorted(itertools.product(*glyphs)):
            lookup.ligatures[g] = replacement 
Example 12
Project: otRebuilder   Author: Pal3love   File: ast.py    (MIT License) View Source Project 6 votes vote down vote up
def build(self, builder):
        if self.enumerated:
            g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()]
            for glyph1, glyph2 in itertools.product(*g):
                builder.add_specific_pair_pos(
                    self.location, glyph1, self.valuerecord1,
                    glyph2, self.valuerecord2)
            return

        is_specific = (isinstance(self.glyphs1, GlyphName) and
                       isinstance(self.glyphs2, GlyphName))
        if is_specific:
            builder.add_specific_pair_pos(
                self.location, self.glyphs1.glyph, self.valuerecord1,
                self.glyphs2.glyph, self.valuerecord2)
        else:
            builder.add_class_pair_pos(
                self.location, self.glyphs1.glyphSet(), self.valuerecord1,
                self.glyphs2.glyphSet(), self.valuerecord2) 
Example 13
Project: brain_segmentation   Author: Ryo-Ito   File: train.py    (license) View Source Project 6 votes vote down vote up
def validate(model):
    dice_coefs = []
    for image_path, label_path in zip(df_val["image"], df_val["label"]):
        image = load_nifti(image_path)
        label = load_nifti(label_path)
        centers = [[], [], []]
        for img_len, len_out, center, n_tile in zip(image.shape, args.output_shape, centers, args.n_tiles):
            assert img_len < len_out * n_tile, "{} must be smaller than {} x {}".format(img_len, len_out, n_tile)
            stride = int((img_len - len_out) / (n_tile - 1))
            center.append(len_out / 2)
            for i in range(n_tile - 2):
                center.append(center[-1] + stride)
            center.append(img_len - len_out / 2)
        output = np.zeros((dataset["n_classes"],) + image.shape[:-1])
        for x, y, z in itertools.product(*centers):
            patch = crop_patch(image, [x, y, z], args.input_shape)
            patch = np.expand_dims(patch, 0)
            patch = xp.asarray(patch)
            slices_out = [slice(center - len_out / 2, center + len_out / 2) for len_out, center in zip(args.output_shape, [x, y, z])]
            slices_in = [slice((len_in - len_out) / 2, len_in - (len_in - len_out) / 2) for len_out, len_in, in zip(args.output_shape, args.input_shape)]
            output[slice(None), slices_out[0], slices_out[1], slices_out[2]] += chainer.cuda.to_cpu(model(patch).data[0, slice(None), slices_in[0], slices_in[1], slices_in[2]])
        y = np.argmax(output, axis=0).astype(np.int32)
        dice_coefs.append(dice_coefficients(y, label, labels=range(dataset["n_classes"])))
    dice_coefs = np.array(dice_coefs)
    return np.mean(dice_coefs, axis=0) 
Example 14
Project: CausalGAN   Author: mkocaoglu   File: sample.py    (MIT License) View Source Project 6 votes vote down vote up
def take_product(do_dict):
    '''
    this function takes some dictionary like:
        {key1:1, key2:[a,b], key3:[c,d]}
    and returns the dictionary:
        {key1:[1,1,1], key2[a,a,b,b,],key3[c,d,c,d]}
    computing the product of values
    '''
    values=[]
    for v in do_dict.values():
        if hasattr(v,'__iter__'):
            values.append(v)
        else:
            values.append([v])#allows scalar to be passed

    prod_values=np.vstack(product(*values))
    return {k:np.array(v) for k,v in zip(do_dict.keys(),zip(*prod_values))} 
Example 15
Project: det_k_bisbm   Author: junipertcy   File: optimalks.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def _check_if_local_minimum(self, ka, kb, old_desc_len, k_th):
        '''
            The `neighborhood search` as described in the paper.
        '''
        self.is_tempfile_existed = True
        items = map(lambda x: (x[0] + ka, x[1] + kb), product(range(-k_th, k_th + 1), repeat=2))
        # if any item has values less than 1, delete it. Also, exclude the suspected point.
        items = [(i, j) for i, j in items if i >= 1 and j >= 1 and (i, j) != (ka, kb)]
        ka_moving, kb_moving = 0, 0

        for item in items:
            self._calc_and_update(item, old_desc_len)
            if self._is_this_mdl(self.confident_desc_len[(item[0], item[1])]):
                p_estimate = sorted(self.confident_desc_len, key=self.confident_desc_len.get)[0]
                self._logger.info("Found {} that gives an even lower description length ...".format(p_estimate))
                ka_moving, kb_moving, _, _ = self._back_to_where_desc_len_is_lowest()
                break
        if ka_moving * kb_moving == 0:
            return True
        else:
            return False 
Example 16
Project: openstack-ansible-plugins   Author: openstack   File: tagfilter.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def _queue_task(self, host, task, task_vars, play_context):
        """Wipe the notification system and return for config tasks."""
        skip_handlers = task_vars.get('skip_handlers', True)
        if skip_handlers:
            task.notify = None
        skip_tags = task_vars.get('skip_tags')
        if skip_tags:
            if not hasattr(skip_tags, '__iter__'):
                skip_tags = (skip_tags,)
        else:
            skip_tags = ()
        if any([True for (i, j) in itertools.product(skip_tags, task.tags)
               if i in j]):
            return
        else:
            return super(StrategyModule, self)._queue_task(
                host,
                task,
                task_vars,
                play_context
            ) 
Example 17
Project: logodetect   Author: munibasad   File: crop_and_aug.py    (MIT License) View Source Project 6 votes vote down vote up
def aug_pos(annot, im):
    aug_pos_ims = []
    aug_pos_suffixes = []

    rect = get_rect(annot)
    for sx, sy in product(
            range(DATA_AUG_POS_SHIFT_MIN, DATA_AUG_POS_SHIFT_MAX),
            range(DATA_AUG_POS_SHIFT_MIN, DATA_AUG_POS_SHIFT_MAX)):
        cx = rect['cx'] + sx
        cy = rect['cy'] + sy
        cropped_im = im.crop((cx - rect['wid'] // 2, cy - rect['hgt'] // 2,
                              cx + rect['wid'] // 2, cy + rect['hgt'] // 2))
        resized_im = cropped_im.resize((CNN_IN_WIDTH, CNN_IN_HEIGHT))
        aug_pos_ims.append(resized_im)
        aug_pos_suffixes.append('p' + str(sx) + str(sy))
        cropped_im.close()

    return aug_pos_ims, aug_pos_suffixes 
Example 18
Project: mbin   Author: fanglab   File: create_kmer_freq_vectors.py    (license) View Source Project 6 votes vote down vote up
def kmer_freq ( ref_str, k ):
	"""
	Walk through sequence and return k-mer counts plus
	a pseudocount of 1.
	"""
	ref_str = ref_str.upper()
	kmers = []
	for seq in product("ATGC",repeat=k):
		kmers.append( "".join(seq) )

	kmer_counts = Counter()
	for j in range( len(ref_str)-(k-1) ):
		motif    = ref_str[j:j+k]
		kmer_counts[motif] += 1

	# Combine forward and reverse complement motifs into one count
	combined_kmer = Counter()
	for kmer in kmers:
		kmer_rc = rev_comp_motif(kmer)
		if not combined_kmer.get(kmer_rc):
			combined_kmer[kmer] = kmer_counts[kmer] + kmer_counts[kmer_rc] + 1

	return combined_kmer 
Example 19
Project: mbin   Author: fanglab   File: read_scanner.py    (license) View Source Project 6 votes vote down vote up
def kmer_freq ( mode, ref_str, strand, opts ):
	ref_str = ref_str.upper()
	if strand==1:
		ref_str = ref_str[::-1]
	k = opts.comp_kmer
	kmers = []
	for seq in product("ATGC",repeat=k):
		kmers.append( "".join(seq) )

	kmer_counts = Counter()
	for j in range( len(ref_str)-(k-1) ):
		motif    = ref_str[j:j+k]
		kmer_counts[motif] += 1

	# Combine forward and reverse complement motifs into one count
	combined_kmer = Counter()
	for kmer in kmers:
		kmer_rc = motif_tools.rev_comp_motif(kmer)
		if not combined_kmer.get(kmer_rc):
			combined_kmer[kmer] = kmer_counts[kmer] + kmer_counts[kmer_rc] + 1

	return combined_kmer 
Example 20
Project: DataFS   Author: ClimateImpactLab   File: test_search.py    (MIT License) View Source Project 6 votes vote down vote up
def test_config(api1_module, local_auth_module, temp_dir_mod):

    api1_module.attach_authority('local', local_auth_module)

    temp_file = os.path.join(temp_dir_mod, 'config.yml')

    to_config_file(api1_module, config_file=temp_file, profile='myapi')

    for i, j, k in itertools.product(*tuple([range(3) for _ in range(3)])):
        arch = 'team{}_archive{}_var{}'.format(i+1, j+1, k+1)
        api1_module.create(
            arch,
            tags=list(arch.split('_')),
            metadata={
                'description': 'archive_{}_{}_{} description'.format(i, j, k)})

    yield 'myapi', temp_file 
Example 21
Project: Lyra   Author: caterinaurban   File: semantics.py    (Mozilla Public License 2.0) View Source Project 6 votes vote down vote up
def slicing_access_semantics(self, stmt: SlicingAccess, state: State) -> State:
        """Semantics of a slicing access.

        :param stmt: slicing access statement to be executed
        :param state: state before executing the slicing access
        :return: state modified by the slicing access
        """
        target = self.semantics(stmt.target, state).result
        lower = self.semantics(stmt.lower, state).result
        upper = self.semantics(stmt.upper, state).result
        stride = self.semantics(stmt.stride, state).result if stmt.stride else {None}
        result = set()
        for primary, start, stop, step in itertools.product(target, lower, upper, stride):
            slicing = Slicing(primary.typ, primary, start, stop, step)
            result.add(slicing)
        state.result = result
        return state 
Example 22
Project: krait   Author: lmdu   File: motif.py    (GNU General Public License v2.0) View Source Project 6 votes vote down vote up
def mapping(self):
		bases = ['A', 'T', 'C', 'G']
		motifs = {}
		for i in range(6):
			for motif in itertools.product(bases, repeat=i+1):
				motif = "".join(list(motif))
				if not is_motif(motif):
					continue

				smotif = self.standard(motif)
				if smotif not in motifs:
					motifs[smotif] = []

				if motif not in motifs[smotif]:
					motifs[smotif].append(motif)

		return motifs 
Example 23
Project: dnc-theano   Author: khaotik   File: dataset.py    (MIT License) View Source Project 6 votes vote down vote up
def download(self, local_dir_=None, url_=None):
        '''
        Args:
            local_dir_: where to save downloaded file
            url_: where to download dataset, if None, use default 'http://yann.lecun.com/exdb/mnist/'
        '''
        # TODO check whether file exists
        if url_ is None:
            url_ = 'http://yann.lecun.com/exdb/mnist/'
        if local_dir_ is None:
            local_dir = self.DEFAULT_DIR
        else:
            local_dir = Path(local_dir_)
        local_dir.mkdir(parents=True, exist_ok=True)
        in_filename = '%(subset)s-%(type_s)s-idx%(ndim)s-ubyte.gz'
        for subset, (type_s, ndim) in product(
            ('train', 't10k'), zip(('images', 'labels'), (3,1))):
            filename = in_filename % locals()
            urllib.request.urlretrieve( url_ + filename, str(local_dir / filename)) 
Example 24
Project: EventStoryLine   Author: tommasoc80   File: baseline_PPMI1.py    (license) View Source Project 6 votes vote down vote up
def cross_sentence(event_lemma_dict):
    """
    function to create all possible pairs between event mentions in a file
    :param event_lemma_dict: dictionary of event lemmas in file
    :return: counter dictionary of event pairs in a file
    """

    full_event_file = []
    pairs_circumstantial_corpus = Counter([])

    for k, v in event_lemma_dict.items():
        full_event_file.append(k)

    event_pairs_full = list(product(full_event_file, repeat=2))

    for i in event_pairs_full:
        pairs_circumstantial_corpus.update([i])

    return pairs_circumstantial_corpus 
Example 25
Project: EventStoryLine   Author: tommasoc80   File: baseline_PPMI1.py    (license) View Source Project 6 votes vote down vote up
def sentence_coocc(event_lemma_dict, event_same_sentence):
    """
    funtion create pairs of events in the same sentence - same sentence event pairs
    :param event_same_sentence: dictionary with list of event markable co-ccurring in same sentence
    :param event_lemma_dict: dictionary of event ids and lemmas in file
    :return: counter dictionary of event pairs in the same sentence
    """

    same_sentence_event_lemma = collections.defaultdict(list)
    pairs_circumstantial_sentence = {}

    for k, v in event_lemma_dict.items():
        for k1, v1 in event_same_sentence.items():
            if k in v1:
                event_string = "_".join(v)
                same_sentence_event_lemma[k1].append(event_string)

    for k, v in same_sentence_event_lemma.items():
        if len(v) >= 2:
            same_sent_pairs = list(product(v, repeat=2))
            pairs_circumstantial_sentence[k] = same_sent_pairs

    return pairs_circumstantial_sentence 
Example 26
Project: mpnum   Author: dseuss   File: mppovm.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def probability_map(self):
        """Map that takes a raveled MPDO to the POVM probabilities

        You can use :func:`MPPovm.expectations()` or
        :func:`MPPovm.pmf()` as convenient wrappers around this map.

        If `rho` is a matrix product density operator (MPDO), then

        .. code::python

            mp.dot(a_povm.probability_map, rho.ravel())

        produces the POVM probabilities as MPA (similar to
        :func:`mpnum.povm.localpovm.POVM.probability_map`).

        """
        # See :func:`.localpovm.POVM.probability_map` for explanation
        # of the transpose.
        return self.transpose((0, 2, 1)).reshape(
            (pdim[0], -1) for pdim in self.shape) 
Example 27
Project: mpnum   Author: dseuss   File: mparray.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def axis_iter(self, axes=0):
        """Returns an iterator yielding Sub-MPArrays of ``self`` by iterating
        over the specified physical axes.

        **Example:** If ``self`` represents a bipartite (i.e. length 2)
        array with 2 physical dimensions on each site ``A[(k,l), (m,n)]``,
        ``self.axis_iter(0)`` is equivalent to::

            (A[(k, :), (m, :)] for m in range(...) for k in range(...))

        :param axes: Iterable or int specifiying the physical axes to iterate
            over (default 0 for each site)
        :returns: Iterator over :class:`.MPArray`

        """
        if not isinstance(axes, collections.Iterable):
            axes = it.repeat(axes, len(self))

        ltens_iter = it.product(*(iter(np.rollaxis(lten, i + 1))
                                  for i, lten in zip(axes, self.lt)))
        return (MPArray(ltens) for ltens in ltens_iter)

    ##########################
    #  Algebraic operations  #
    ########################## 
Example 28
Project: mpnum   Author: dseuss   File: mparray.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def inner(mpa1, mpa2):
    """Compute the inner product `<mpa1|mpa2>`. Both have to have the same
    physical dimensions. If these represent a MPS, ``inner(...)`` corresponds
    to the canoncial Hilbert space scalar product. If these represent a MPO,
    ``inner(...)`` corresponds to the Frobenius scalar product (with Hermitian
    conjugation in the first argument)

    :param mpa1: MPArray with same number of physical legs on each site
    :param mpa2: MPArray with same physical shape as mpa1
    :returns: <mpa1|mpa2>

    """
    assert len(mpa1) == len(mpa2), \
        "Length is not equal: {} != {}".format(len(mpa1), len(mpa2))
    ltens_new = (_local_dot(_local_ravel(l).conj(), _local_ravel(r), axes=(1, 1))
                 for l, r in zip(mpa1.lt, mpa2.lt))
    return _ltens_to_array(ltens_new)[0, ..., 0] 
Example 29
Project: mpnum   Author: dseuss   File: mparray.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def chain(mpas, astype=None):
    """Computes the tensor product of MPAs given in ``*args`` by adding more
    sites to the array.

    :param mpas: Iterable of MPAs in the order as they should appear in the
        chain
    :param astype: dtype of the returned MPA. If ``None``, use the type of the
        first MPA.
    :returns: MPA of length ``len(args[0]) + ... + len(args[-1])``

    .. todo:: Make this canonicalization aware
    .. todo:: Raise warning when casting complex to real dtype

    """
    mpas = iter(mpas)
    try:
        first = next(mpas)
    except StopIteration:
        raise ValueError('Argument `mpas` is an empty list')
    rest = (lt for mpa in mpas for lt in mpa.lt)
    if astype is None:
        astype = type(first)
    return astype(it.chain(first.lt, rest)) 
Example 30
Project: mpnum   Author: dseuss   File: mparray.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def norm(mpa):
    """Computes the norm (Hilbert space norm for MPS, Frobenius norm for MPO)
    of the matrix product operator. In contrast to ``mparray.inner``, this can
    take advantage of the canonicalization

    WARNING This also changes the MPA inplace by normalizing.

    :param mpa: MPArray
    :returns: l2-norm of that array

    """
    mpa.canonicalize()
    current_lcanon, current_rcanon = mpa.canonical_form

    if current_rcanon == 1:
        return np.linalg.norm(mpa.lt[0])
    elif current_lcanon == len(mpa) - 1:
        return np.linalg.norm(mpa.lt[-1])
    else:
        raise ValueError("Normalization error in MPArray.norm") 
Example 31
Project: mpnum   Author: dseuss   File: mparray_test.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def test_split(nr_sites, local_dim, rank, rgen):
    if nr_sites < 2:
        return
    mpa = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen)
    for pos in range(nr_sites - 1):
        mpa_l, mpa_r = mpa.split(pos)
        assert len(mpa_l) == pos + 1
        assert len(mpa_l) + len(mpa_r) == nr_sites
        assert_correct_normalization(mpa_l)
        assert_correct_normalization(mpa_r)
        recons = np.tensordot(mpa_l.to_array(), mpa_r.to_array(), axes=(-1, 0))
        assert_array_almost_equal(mpa.to_array(), recons)

    for (lnorm, rnorm) in it.product(range(nr_sites - 1), range(1, nr_sites)):
        mpa_l, mpa_r = mpa.split(nr_sites // 2 - 1)
        assert_correct_normalization(mpa_l)
        assert_correct_normalization(mpa_r) 
Example 32
Project: mpnum   Author: dseuss   File: povm_test.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def test_povm_ic_mpa(nr_sites, local_dim, rank, rgen):
    # Check that the tensor product of the PauliGen POVM is IC.
    paulis = povm.pauli_povm(local_dim)
    inv_map = mp_from_array_repeat(paulis.linear_inversion_map, nr_sites)
    probab_map = mp_from_array_repeat(paulis.probability_map, nr_sites)
    reconstruction_map = mp.dot(inv_map, probab_map)

    eye = factory.eye(nr_sites, local_dim**2)
    assert mp.norm(reconstruction_map - eye) < 1e-5

    # Check linear inversion for a particular example MPA.
    # Linear inversion works for arbitrary matrices, not only for states,
    # so we test it for an arbitrary MPA.
    # Normalize, otherwise the absolute error check below will not work.
    mpa = factory.random_mpa(nr_sites, local_dim**2, rank,
                             dtype=np.complex_, randstate=rgen, normalized=True)
    probabs = mp.dot(probab_map, mpa)
    recons = mp.dot(inv_map, probabs)
    assert mp.norm(recons - mpa) < 1e-6 
Example 33
Project: crypto-sentiment   Author: codingupastorm   File: vadersentiment.py    (license) View Source Project 6 votes vote down vote up
def _words_plus_punc(self):
        """
        Returns mapping of form:
        {
            'cat,': 'cat',
            ',cat': 'cat',
        }
        """
        no_punc_text = REGEX_REMOVE_PUNCTUATION.sub('', self.text)
        # removes punctuation (but loses emoticons & contractions)
        words_only = no_punc_text.split()
        # remove singletons
        words_only = set( w for w in words_only if len(w) > 1 )
        # the product gives ('cat', ',') and (',', 'cat')
        punc_before = {''.join(p): p[1] for p in product(PUNC_LIST, words_only)}
        punc_after = {''.join(p): p[0] for p in product(words_only, PUNC_LIST)}
        words_punc_dict = punc_before
        words_punc_dict.update(punc_after)
        return words_punc_dict 
Example 34
Project: gixy   Author: yandex   File: regexp.py    (Mozilla Public License 2.0) View Source Project 6 votes vote down vote up
def _gen_combinator(variants, _merge=True):
    if not hasattr(variants, '__iter__'):
        return [variants] if variants is not None else []

    res = []
    need_product = False
    for var in variants:
        if isinstance(var, list):
            sol = _gen_combinator(var, _merge=False)
            res.append(sol)
            need_product = True
        elif var is not None:
            res.append(var)

    if need_product:
        producted = itertools.product(*res)
        if _merge:
            # TODO(buglloc): ??!
            return list(six.moves.map(_merge_variants, producted))
        return producted
    elif _merge:
        return list(six.moves.map(_merge_variants, [res]))
    return res 
Example 35
Project: OptML   Author: johannespetrat   File: gridsearch_optimizer.py    (license) View Source Project 6 votes vote down vote up
def build_grid(self, grid_sizes):
        grid_dict = {}
        for param_name, param in self.param_dict.items():
            if param.param_type == 'continuous':
                grid_dict[param_name] = np.linspace(param.lower, param.upper, grid_sizes[param_name])
            elif param.param_type == 'integer':
                step_size = int(round((param.upper - param.lower)/float(grid_sizes[param_name])))
                grid_dict[param_name] = np.concatenate([np.arange(param.lower, param.upper, step_size), [param.upper]])
            elif param.param_type == 'categorical':
                grid_dict[param_name] = param.possible_values
            elif param.param_type == 'boolean':
                grid_dict[param_name] = [True, False]
        # now build the grid as a list with all possible combinations i.e. the cartesian product
        grid = []
        for params in list(itertools.product(*[[(k,v) for v in vals] for k, vals in grid_dict.items()])):
            grid.append(dict(params))
        return grid 
Example 36
Project: pytorch-dist   Author: apaszke   File: test_torch.py    (license) View Source Project 6 votes vote down vote up
def assertIsOrdered(self, order, x, mxx, ixx, task):
        SIZE = 4
        if order == 'descending':
            check_order = lambda a, b: a >= b
        elif order == 'ascending':
            check_order = lambda a, b: a <= b
        else:
            error('unknown order "{}", must be "ascending" or "descending"'.format(order))

        are_ordered = True
        for j, k in product(range(SIZE), range(1, SIZE)):
            self.assertTrue(check_order(mxx[j][k-1], mxx[j][k]),
                    'torch.sort ({}) values unordered for {}'.format(order, task))

        seen = set()
        indicesCorrect = True
        size = x.size(x.dim()-1)
        for k in range(size):
            seen.clear()
            for j in range(size):
                self.assertEqual(x[k][ixx[k][j]], mxx[k][j],
                        'torch.sort ({}) indices wrong for {}'.format(order, task))
                seen.add(ixx[k][j])
            self.assertEqual(len(seen), size) 
Example 37
Project: pytorch-dist   Author: apaszke   File: OptionalArguments.py    (license) View Source Project 6 votes vote down vote up
def process_declarations(self, declarations):
        new_options = []
        for declaration in declarations:
            for option in declaration['options']:
                optional_args = []
                for i, arg in enumerate(option['arguments']):
                    if 'default' in arg:
                        optional_args.append(i)
                for permutation in product((True, False), repeat=len(optional_args)):
                    option_copy = deepcopy(option)
                    for i, bit in zip(optional_args, permutation):
                        arg = option_copy['arguments'][i]
                        if not bit:
                            arg['type'] = 'CONSTANT'
                            arg['ignore_check'] = True
                            # PyYAML interprets NULL as None...
                            arg['name'] = 'NULL' if arg['default'] is None else arg['default']
                    new_options.append(option_copy)
            declaration['options'] = self.filter_unique_options(declaration['options'] + new_options)
        return declarations 
Example 38
Project: pytorch-dist   Author: apaszke   File: THPPlugin.py    (license) View Source Project 6 votes vote down vote up
def make_stateless(self, declaration):
        declaration['name'] = 'THPTensor_stateless_({})'.format(declaration['name'])
        new_options = []
        for option in declaration['options']:
            option['cname'] = 'THTensor_({})'.format(option['cname'])
            allocated = []
            for i, arg in enumerate(option['arguments']):
                if 'allocate' in arg and arg['allocate']:
                    arg['ignore_check'] = True
                    allocated.append(i)
                if arg['name'] == 'self':
                    arg['name'] = 'source'
            for permutation in product((True, False), repeat=len(allocated)):
                option_copy = deepcopy(option)
                for i, bit in zip(allocated, permutation):
                    arg = option_copy['arguments'][i]
                    # By default everything is allocated, so we don't have to do anything
                    if not bit:
                        del arg['allocate']
                        del arg['ignore_check']
                new_options.append(option_copy)
        declaration['options'] = self.filter_unique_options(declaration['options'] + new_options)
        return declaration 
Example 39
Project: q2-diversity   Author: qiime2   File: _visualizer.py    (license) View Source Project 6 votes vote down vote up
def _compute_rarefaction_data(feature_table, min_depth, max_depth, steps,
                              iterations, phylogeny, metrics):
    depth_range = np.linspace(min_depth, max_depth, num=steps, dtype=int)
    iter_range = range(1, iterations + 1)

    rows = feature_table.ids(axis='sample')
    cols = pd.MultiIndex.from_product([list(depth_range), list(iter_range)],
                                      names=['depth', 'iter'])
    data = {k: pd.DataFrame(np.NaN, index=rows, columns=cols)
            for k in metrics}

    for d, i in itertools.product(depth_range, iter_range):
        rt = rarefy(feature_table, d)
        for m in metrics:
            if m in phylogenetic_metrics():
                vector = alpha_phylogenetic(table=rt, metric=m,
                                            phylogeny=phylogeny)
            else:
                vector = alpha(table=rt, metric=m)
            data[m][(d, i)] = vector
    return data 
Example 40
Project: planet-b-saleor   Author: planet-b   File: random_data.py    (license) View Source Project 6 votes vote down vote up
def set_variant_attributes(variant, product_class):
    attr_dict = {}
    existing_variants = variant.product.variants.values_list('attributes',
                                                             flat=True)
    existing_variant_attributes = defaultdict(list)
    for variant_attrs in existing_variants:
        for attr_id, value_id in variant_attrs.items():
            existing_variant_attributes[attr_id].append(value_id)

    for product_attribute in product_class.variant_attributes.all():
        available_values = product_attribute.values.exclude(
            pk__in=[int(pk) for pk
                    in existing_variant_attributes[str(product_attribute.pk)]])
        if not available_values:
            return
        value = random.choice(available_values)
        attr_dict[str(product_attribute.pk)] = str(value.pk)
    variant.attributes = attr_dict
    variant.save(update_fields=['attributes']) 
Example 41
Project: dexpy   Author: statease   File: test_power.py    (license) View Source Project 6 votes vote down vote up
def test_large_power(cls):
        """Test power for a 9 factor model."""
        factor_count = 9

        factor_data = []
        # generate a 2^9 factorial
        for run in itertools.product([-1, 1], repeat=factor_count):
            factor_data.append(list(run))
        factor_data = pd.DataFrame(factor_data, columns=design.get_factor_names(factor_count))

        model = "(X1+X2+X3+X4+X5+X6+X7+X8+X9)**4" # will generate a 4fi model

        power_result = power.f_power(model, factor_data, 0.2, 0.05)

        answer = np.ndarray(256)
        answer.fill(0.61574355066172015)
        answer[0] = 0.99459040972676238
        np.testing.assert_allclose(power_result, answer, rtol=1e-4) 
Example 42
Project: phredutils   Author: doctaphred   File: maths.py    (license) View Source Project 6 votes vote down vote up
def reordered_digit_map(exponents, base=2):
    """Construct a mapping which answers the question:

    If a base's exponents are applied to a number's digits in arbitrary
    order (rather than the conventional greatest-to-least/"big-endian"
    ordering), what will its conventionally-calculated value be?

    Since every possible value will be included in this mapping, it is
    implemented as an indexable tuple rather than a dict.

    >>> reordered_digit_map([1, 0])
    (0, 1, 2, 3)
    >>> reordered_digit_map([0, 1])
    (0, 2, 1, 3)
    """
    assert sorted(exponents) == list(range(len(exponents)))
    digit_values = range(base)
    return tuple(
        sum(digit * (base ** exponent)
            for digit, exponent in zip(digits, exponents))
        for digits in product(digit_values, repeat=len(exponents))
    ) 
Example 43
Project: deb-python-pint   Author: openstack   File: test_contexts.py    (license) View Source Project 6 votes vote down vote up
def test_spectroscopy(self):
        ureg = self.ureg
        eq = (532. * ureg.nm, 563.5 * ureg.terahertz, 2.33053 * ureg.eV)
        with ureg.context('sp'):
            from pint.util import find_shortest_path
            for a, b in itertools.product(eq, eq):
                for x in range(2):
                    if x == 1:
                        a = a.to_base_units()
                        b = b.to_base_units()
                    da, db = Context.__keytransform__(a.dimensionality,
                                                      b.dimensionality)
                    p = find_shortest_path(ureg._active_ctx.graph, da, db)
                    self.assertTrue(p)
                    msg = '{0} <-> {1}'.format(a, b)
                    # assertAlmostEqualRelError converts second to first
                    self.assertQuantityAlmostEqual(b, a, rtol=0.01, msg=msg)


        for a, b in itertools.product(eq, eq):
            self.assertQuantityAlmostEqual(a.to(b.units, 'sp'), b, rtol=0.01) 
Example 44
Project: deb-python-pint   Author: openstack   File: test_pitheorem.py    (license) View Source Project 6 votes vote down vote up
def test_inputs(self):
        V = 'km/hour'
        T = 'ms'
        L = 'cm'

        f1 = lambda x: x
        f2 = lambda x: self.Q_(1, x)
        f3 = lambda x: self.Q_(1, x).units
        f4 = lambda x: self.Q_(1, x).dimensionality

        fs = f1, f2, f3, f4
        for fv, ft, fl in itertools.product(fs, fs, fs):
            qv = fv(V)
            qt = ft(T)
            ql = ft(L)
            self.assertEqual(self.ureg.pi_theorem({'V': qv, 'T': qt, 'L': ql}),
                             [{'V': 1.0, 'T': 1.0, 'L': -1.0}]) 
Example 45
Project: deb-python-pint   Author: openstack   File: registry.py    (license) View Source Project 6 votes vote down vote up
def parse_unit_name(self, unit_name, case_sensitive=True):
        """Parse a unit to identify prefix, unit name and suffix
        by walking the list of prefix and suffix.

        :rtype: (str, str, str)
        """
        stw = unit_name.startswith
        edw = unit_name.endswith
        for suffix, prefix in itertools.product(self._suffixes, self._prefixes):
            if stw(prefix) and edw(suffix):
                name = unit_name[len(prefix):]
                if suffix:
                    name = name[:-len(suffix)]
                    if len(name) == 1:
                        continue
                if case_sensitive:
                    if name in self._units:
                        yield (self._prefixes[prefix].name,
                               self._units[name].name,
                               self._suffixes[suffix])
                else:
                    for real_name in self._units_casei.get(name.lower(), ()):
                        yield (self._prefixes[prefix].name,
                               self._units[real_name].name,
                               self._suffixes[suffix]) 
Example 46
Project: gym-extensions   Author: Breakend   File: geometry_utils.py    (license) View Source Project 6 votes vote down vote up
def rectangle_to_rectangle_distance(ca, cb, wa, wb, ha, hb):
    a1 = ca + np.array([wa/2.0, ha/2.0])
    a2 = ca + np.array([wa/2.0, -ha/2.0])
    a3 = ca + np.array([-wa/2.0, -ha/2.0])
    a4 = ca + np.array([-wa/2.0, ha/2.0])

    b1 = cb + np.array([wb/2.0,   hb/2.0])
    b2 = cb + np.array([wb/2.0,  -hb/2.0])
    b3 = cb + np.array([-wb/2.0, -hb/2.0])
    b4 = cb + np.array([-wb/2.0,  hb/2.0])
        
    for e1, e2 in product(rectangle_edges(a1,a2,a3,a4), rectangle_edges(b1,b2,b3,b4)):
        if segments_intersect(e1[0], e1[1], e2[0], e2[1]):
            return 0.0
    
    da1 = point_to_rectangle_distance(a1, cb, wb, hb)
    da2 = point_to_rectangle_distance(a2, cb, wb, hb)
    da3 = point_to_rectangle_distance(a3, cb, wb, hb)
    da4 = point_to_rectangle_distance(a4, cb, wb, hb)

    db1 = point_to_rectangle_distance(b1, ca, wa, ha)
    db2 = point_to_rectangle_distance(b2, ca, wa, ha)
    db3 = point_to_rectangle_distance(b3, ca, wa, ha)
    db4 = point_to_rectangle_distance(b4, ca, wa, ha)    
    return min([da1, da2, da3, da4, db1, db2, db3, db4]) 
Example 47
Project: robot-arena   Author: kenganong   File: board.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def traverse(self):
    return ((self.contents[pos[0]][pos[1]], pos) for pos in itertools.product(range(self.rows), range(self.cols))) 
Example 48
Project: onto-lstm   Author: pdasigi   File: model_sentences.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def _make_one_hot(self, word_inds, vec_size):
    onehot = numpy.zeros((word_inds.shape + (vec_size,)))
    for inds in itertools.product(*[numpy.arange(s) for s in word_inds.shape]):
      onehot[inds+(word_inds[inds],)] = 1
    return onehot 
Example 49
Project: HousePricePredictionKaggle   Author: Nuwantha   File: linearRegression_lassoRegularization.py    (license) View Source Project 5 votes vote down vote up
def poly(X):
    areas = ['LotArea', 'TotalBsmtSF', 'GrLivArea', 'GarageArea', 'BsmtUnfSF']
    # t = [s for s in X.axes[1].get_values() if s not in areas]
    t = chain(qu_list.axes[1].get_values(), 
              ['OverallQual', 'OverallCond', 'ExterQual', 'ExterCond', 'BsmtCond', 'GarageQual', 'GarageCond',
               'KitchenQual', 'HeatingQC', 'bad_heating', 'MasVnrType_Any', 'SaleCondition_PriceDown', 'Reconstruct',
               'ReconstructAfterBuy', 'Build.eq.Buy'])
    for a, t in product(areas, t):
        x = X.loc[:, [a, t]].prod(1)
        x.name = a + '_' + t
        yield x 
Example 50
Project: Causality   Author: vcla   File: causal_grammar.py    (MIT License) View Source Project 5 votes vote down vote up
def generate_parses(causal_tree):
	node_type = causal_tree["node_type"]
	if "children" not in causal_tree:
		return (causal_tree,)
	partial_causal_parses = []
	# make a copy of the current node, minus the children (so we're keeping symbol_type, symbol, energy, node_type, etc)
	current_node = causal_tree.copy()
	current_node.pop("children")
	if node_type in ("or","root",):
		for child_node in causal_tree["children"]:
			for parse in generate_parses(child_node):
				current_node["children"] = (parse,)
				partial_causal_parses.append(current_node.copy())
	elif node_type in ("and",):
		# generate causal parses on each tree
		# build all cartesian products of those causal parses;
		# each cartesian product is a set of children for the and node, a separate partial parse graph to return
		child_parses = []
		for child_node in causal_tree["children"]:
			child_parses.append(generate_parses(child_node),)
		for product in itertools.product(*child_parses):
			current_node["children"] = product
			partial_causal_parses.append(current_node.copy())
	else:
		raise Exception("UNKNOWN NODE TYPE: {}".format(node_type))
	return partial_causal_parses