Python numpy.product() Examples

The following are 30 code examples of numpy.product(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: slowreplib.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def adjoint_acton(self, state):
        """ Act the adjoint of this gate map on an input state """
        #NOTE: Same as acton except uses 'adjoint_acton(...)' below
        output_state = DMStateRep(_np.zeros(state.base.shape, 'd'))
        offset = self.offset  # if relToBlock else self.offset (relToBlock == False here)

        for b in _itertools.product(*self.basisInds_noop_blankaction):  # zeros in all action-index locations
            vec_index_noop = _np.dot(self.multipliers, tuple(b))
            inds = []
            for op_b in _itertools.product(*self.basisInds_action):
                vec_index = vec_index_noop
                for i, bInd in zip(self.actionInds, op_b):
                    #b[i] = bInd #don't need to do this; just update vec_index:
                    vec_index += self.multipliers[i] * bInd
                inds.append(offset + vec_index)
            embedded_instate = DMStateRep(state.base[inds])
            embedded_outstate = self.embedded.adjoint_acton(embedded_instate)
            output_state.base[inds] += embedded_outstate.base

        #act on other blocks trivially:
        self._acton_other_blocks_trivially(output_state, state)
        return output_state 
Example #2
Source File: pauliobjs.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def dot(self, other):
        """
        Computes the Hilbert-Schmidt dot product (normed to 1) between this
        Pauli operator and `other`.

        Parameters
        ----------
        other : NQPauliOp
            The other operator to take a dot product with.

        Returns
        -------
        integer
            Either 0, 1, or -1.
        """
        assert(len(self) == len(other)), "Length mismatch!"
        if other.rep == self.rep:
            return self.sign * other.sign
        else:
            return 0 
Example #3
Source File: test_parameters.py    From pywr with GNU General Public License v3.0 6 votes vote down vote up
def test_basic_use(self, simple_linear_model):
        """ Test the basic use of `ConstantParameter` using the Python API """
        model = simple_linear_model
        # Add two scenarios
        scA = Scenario(model, 'Scenario A', size=2)
        scB = Scenario(model, 'Scenario B', size=5)

        p = ConstantParameter(model, np.pi, name='pi', comment='Mmmmm Pi!')

        assert not p.is_variable
        assert p.double_size == 1
        assert p.integer_size == 0

        model.setup()
        ts = model.timestepper.current
        # Now ensure the appropriate value is returned for all scenarios
        for i, (a, b) in enumerate(itertools.product(range(scA.size), range(scB.size))):
            si = ScenarioIndex(i, np.array([a, b], dtype=np.int32))
            np.testing.assert_allclose(p.value(ts, si), np.pi) 
Example #4
Source File: test_parameters.py    From pywr with GNU General Public License v3.0 6 votes vote down vote up
def test_parameter_constant_scenario(simple_linear_model):
    """
    Test ConstantScenarioParameter

    """
    model = simple_linear_model
    # Add two scenarios
    scA = Scenario(model, 'Scenario A', size=2)
    scB = Scenario(model, 'Scenario B', size=5)

    p = ConstantScenarioParameter(model, scB, np.arange(scB.size, dtype=np.float64))
    model.setup()
    ts = model.timestepper.current
    # Now ensure the appropriate value is returned for the Scenario B indices.
    for i, (a, b) in enumerate(itertools.product(range(scA.size), range(scB.size))):
        si = ScenarioIndex(i, np.array([a, b], dtype=np.int32))
        np.testing.assert_allclose(p.value(ts, si), float(b)) 
Example #5
Source File: test_parameters.py    From pywr with GNU General Public License v3.0 6 votes vote down vote up
def test_parameter_constant_scenario(simple_linear_model):
    """
    Test ConstantScenarioIndexParameter

    """
    model = simple_linear_model
    # Add two scenarios
    scA = Scenario(model, 'Scenario A', size=2)
    scB = Scenario(model, 'Scenario B', size=5)

    p = ConstantScenarioIndexParameter(model, scB, np.arange(scB.size, dtype=np.int32))
    model.setup()
    ts = model.timestepper.current
    # Now ensure the appropriate value is returned for the Scenario B indices.
    for i, (a, b) in enumerate(itertools.product(range(scA.size), range(scB.size))):
        si = ScenarioIndex(i, np.array([a, b], dtype=np.int32))
        np.testing.assert_allclose(p.index(ts, si), b) 
Example #6
Source File: TermDocMatrixFilter.py    From scattertext with Apache License 2.0 6 votes vote down vote up
def get_low_pmi_bigrams(threshold_coef, word_freq_df):
	# type: (float, pd.DataFrame) -> object
	is_bigram = np.array([' ' in word for word in word_freq_df.index])
	unigram_freq = word_freq_df[~is_bigram].sum(axis=1)
	bigram_freq = word_freq_df[is_bigram].sum(axis=1)
	bigram_prob = bigram_freq / bigram_freq.sum()
	unigram_prob = unigram_freq / unigram_freq.sum()

	def get_pmi(bigram):
		try:
			return np.log(
				bigram_prob[bigram] / np.product([unigram_prob[word] for word in bigram.split(' ')])
			) / np.log(2)
		except:
			return 0

	low_pmi_bigrams = bigram_prob[bigram_prob.index.map(get_pmi) < threshold_coef * 2]
	return low_pmi_bigrams 
Example #7
Source File: test_parameters.py    From pywr with GNU General Public License v3.0 6 votes vote down vote up
def test_load(self, simple_linear_model):
        """ Test load from JSON dict"""
        model = simple_linear_model
        data = {
            "type": "aggregated",
            "agg_func": "product",
            "parameters": [
                0.8,
                {
                    "type": "monthlyprofile",
                    "values": list(range(12))
                }
            ]
        }

        p = load_parameter(model, data)
        # Correct instance is loaded
        assert isinstance(p, AggregatedParameter)

        @assert_rec(model, p)
        def expected(timestep, scenario_index):
            return (timestep.month - 1) * 0.8

        model.run() 
Example #8
Source File: test_core.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1)) 
Example #9
Source File: helper_functions_twitter.py    From glc with Apache License 2.0 6 votes vote down vote up
def word_list_to_embedding_product(words, embeddings, embedding_dimension=50):
    '''
    :param words: an n x (2*window_size + 1) matrix from data_to_mat
    :param embeddings: an embedding dictionary where keys are strings and values
    are embeddings; the output from embeddings_to_dict
    :param embedding_dimension: the dimension of the values in embeddings; in this
    assignment, embedding_dimension=50
    :return: an n x embedding_dimension matrix where the embeddings of an example were
    the hadamard product of the embeddings occupies a row in this matrix
    '''
    m, n = words.shape
    words = words.reshape((-1))

    out = np.array([embeddings[w] for w in words], dtype=np.float32).reshape(m, n, embedding_dimension)
    return np.product(out, 1)

# Problem 1 Part 2 
Example #10
Source File: yolov3_to_onnx.py    From iAI with MIT License 6 votes vote down vote up
def _load_one_param_type(self, conv_params, param_category, suffix):
        """Deserializes the weights from a file stream in the DarkNet order.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name = conv_params.generate_param_name(param_category, suffix)
        channels_out, channels_in, filter_h, filter_w = conv_params.conv_weight_dims
        if param_category == 'bn':
            param_shape = [channels_out]
        elif param_category == 'conv':
            if suffix == 'weights':
                param_shape = [channels_out, channels_in, filter_h, filter_w]
            elif suffix == 'bias':
                param_shape = [channels_out]
        param_size = np.product(np.array(param_shape))
        param_data = np.ndarray(
            shape=param_shape,
            dtype='float32',
            buffer=self.weights_file.read(param_size * 4))
        param_data = param_data.flatten().astype(float)
        return param_name, param_data, param_shape 
Example #11
Source File: yolov3_to_onnx.py    From iAI with MIT License 6 votes vote down vote up
def _load_one_param_type(self, conv_params, param_category, suffix):
        """Deserializes the weights from a file stream in the DarkNet order.

        Keyword arguments:
        conv_params -- a ConvParams object
        param_category -- the category of parameters to be created ('bn' or 'conv')
        suffix -- a string determining the sub-type of above param_category (e.g.,
        'weights' or 'bias')
        """
        param_name = conv_params.generate_param_name(param_category, suffix)
        channels_out, channels_in, filter_h, filter_w = conv_params.conv_weight_dims
        if param_category == 'bn':
            param_shape = [channels_out]
        elif param_category == 'conv':
            if suffix == 'weights':
                param_shape = [channels_out, channels_in, filter_h, filter_w]
            elif suffix == 'bias':
                param_shape = [channels_out]
        param_size = np.product(np.array(param_shape))
        param_data = np.ndarray(
            shape=param_shape,
            dtype='float32',
            buffer=self.weights_file.read(param_size * 4))
        param_data = param_data.flatten().astype(float)
        return param_name, param_data, param_shape 
Example #12
Source File: test_old_ma.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_testAddSumProd(self):
        # Test add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        assert_(eq(np.add.reduce(x), add.reduce(x)))
        assert_(eq(np.add.accumulate(x), add.accumulate(x)))
        assert_(eq(4, sum(array(4), axis=0)))
        assert_(eq(4, sum(array(4), axis=0)))
        assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
        assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
        assert_(eq(np.sum(x, 0), sum(x, 0)))
        assert_(eq(np.product(x, axis=0), product(x, axis=0)))
        assert_(eq(np.product(x, 0), product(x, 0)))
        assert_(eq(np.product(filled(xm, 1), axis=0),
                           product(xm, axis=0)))
        if len(s) > 1:
            assert_(eq(np.concatenate((x, y), 1),
                               concatenate((xm, ym), 1)))
            assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
            assert_(eq(np.sum(x, 1), sum(x, 1)))
            assert_(eq(np.product(x, 1), product(x, 1))) 
Example #13
Source File: polynomial.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def evaluate(self, variable_values):
        """
        Evaluate this polynomial for a given set of variable values.

        Parameters
        ----------
        variable_values : array-like
            An object that can be indexed so that `variable_values[i]` gives the
            numerical value for i-th variable (x_i).

        Returns
        -------
        float or complex
            Depending on the types of the coefficients and `variable_values`.
        """
        #FUTURE: make this function smarter (Russian peasant)
        ret = 0
        for ivar, coeff in self.items():
            ret += coeff * _np.product([variable_values[i] for i in ivar])
        assert(_np.isclose(ret, self.fastpoly.evaluate(variable_values)))
        self.check_fastpoly()
        return ret 
Example #14
Source File: utils.py    From vampyre with MIT License 6 votes vote down vote up
def repeat_sum(u,shape,rep_axes):
    """
    Computes sum of a repeated matrix
    
    In effect, this routine computes 
    code:`np.sum(repeat(u,shape,rep_axes))`.  However, it performs
    this without having to perform the full repetition.
    
    """
    # Must convert to np.array to perform slicing
    shape_vec = np.array(shape,dtype=int)
    rep_vec = np.array(rep_axes,dtype=int)
    
    # repeat and sum
    urep = repeat_axes(u,shape,rep_axes,rep=False)
    usum = np.sum(urep)*np.product(shape_vec[rep_vec])
    return usum 
Example #15
Source File: labeldicts.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def tensor_product_block_dims(self, iTPB):  # unused
        """
        Get the dimension corresponding to each label in the
        `iTBP`-th tensor-product block.  The dimension of the
        entire block is the product of these.

        Parameters
        ----------
        iTPD : int
           The index of the tensor product block whose state-space
           dimensions you wish to retrieve.

        Returns
        -------
        tuple
        """
        return tuple((self.labeldims[lbl] for lbl in self.labels[iTPB])) 
Example #16
Source File: gamp.py    From vampyre with MIT License 6 votes vote down vote up
def cost_adjust(self,r,z,rvar,zvar,shape,var_axes):
        """
        Computes the cost adjustment term for the
        Bethe Free Energy:
            
            J = beta*[log(2*pi*rvar) + ((z-r)**2 + xvar)/rvar]
            
        where beta = 1 for complex problems and 0 for real problems
        """    
        J0 = np.mean(np.log(2*np.pi*rvar))*np.product(shape)
        rvar_rep = common.repeat_axes(rvar,shape,\
                                      var_axes,rep=False)
        J1 = np.sum(np.abs(r-z)**2/rvar_rep)
        J2 = np.mean(zvar/rvar)*np.product(shape)
        J = J0 + J1 + J2
        if not self.is_complex:
            J = J / 2
        return J 
Example #17
Source File: dnn.py    From deep_architect with MIT License 6 votes vote down vote up
def affine_simplified(h_num_hidden):

    def compile_fn(di, dh):
        shape = di['in'].get_shape().as_list()
        n = np.product(shape[1:])

        def forward_fn(di):
            In = di['in']
            if len(shape) > 2:
                In = tf.reshape(In, [-1, n])
            return {'out': tf.layers.dense(In, dh['num_hidden'])}

        return forward_fn

    return siso_tensorflow_module('AffineSimplified', compile_fn,
                                  {'num_hidden': h_num_hidden}) 
Example #18
Source File: dnn.py    From deep_architect with MIT License 6 votes vote down vote up
def affine(h_num_hidden, h_W_init_fn, h_b_init_fn):

    def compile_fn(di, dh):
        m = dh['num_hidden']
        shape = di['in'].get_shape().as_list()
        n = np.product(shape[1:])
        W = tf.Variable(dh['W_init_fn']([n, m]))
        b = tf.Variable(dh['b_init_fn']([m]))

        def forward_fn(di):
            In = di['in']
            if len(shape) > 2:
                In = tf.reshape(In, [-1, n])
            return {'out': tf.add(tf.matmul(In, W), b)}

        return forward_fn

    return siso_tensorflow_module(
        'Affine', compile_fn, {
            'num_hidden': h_num_hidden,
            'W_init_fn': h_W_init_fn,
            'b_init_fn': h_b_init_fn
        }) 
Example #19
Source File: slowreplib.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def adjoint_acton(self, state):
        """ Act the adjoint of this gate map on an input state """
        #NOTE: Same as acton except uses 'adjoint_acton(...)' below
        output_state = SVStateRep(_np.zeros(state.base.shape, complex))
        offset = self.offset  # if relToBlock else self.offset (relToBlock == False here)

        for b in _itertools.product(*self.basisInds_noop_blankaction):  # zeros in all action-index locations
            vec_index_noop = _np.dot(self.multipliers, tuple(b))
            inds = []
            for op_b in _itertools.product(*self.basisInds_action):
                vec_index = vec_index_noop
                for i, bInd in zip(self.actionInds, op_b):
                    #b[i] = bInd #don't need to do this; just update vec_index:
                    vec_index += self.multipliers[i] * bInd
                inds.append(offset + vec_index)
            embedded_instate = SVStateRep(state.base[inds])
            embedded_outstate = self.embedded.adjoint_acton(embedded_instate)
            output_state.base[inds] += embedded_outstate.base

        #act on other blocks trivially:
        self._acton_other_blocks_trivially(output_state, state)
        return output_state 
Example #20
Source File: slowreplib.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def acton(self, state):
        output_state = SVStateRep(_np.zeros(state.base.shape, complex))
        offset = self.offset  # if relToBlock else self.offset (relToBlock == False here)

        for b in _itertools.product(*self.basisInds_noop_blankaction):  # zeros in all action-index locations
            vec_index_noop = _np.dot(self.multipliers, tuple(b))
            inds = []
            for op_b in _itertools.product(*self.basisInds_action):
                vec_index = vec_index_noop
                for i, bInd in zip(self.actionInds, op_b):
                    #b[i] = bInd #don't need to do this; just update vec_index:
                    vec_index += self.multipliers[i] * bInd
                inds.append(offset + vec_index)
            embedded_instate = SVStateRep(state.base[inds])
            embedded_outstate = self.embedded.acton(embedded_instate)
            output_state.base[inds] += embedded_outstate.base

        #act on other blocks trivially:
        self._acton_other_blocks_trivially(output_state, state)
        return output_state 
Example #21
Source File: slowreplib.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def __init__(self, embedded_op, numBasisEls, actionInds,
                 blocksizes, embedded_dim, nComponentsInActiveBlock,
                 iActiveBlock, nBlocks, dim):

        self.embedded = embedded_op
        self.numBasisEls = numBasisEls
        self.actionInds = actionInds
        self.blocksizes = blocksizes

        numBasisEls_noop_blankaction = numBasisEls.copy()
        for i in actionInds: numBasisEls_noop_blankaction[i] = 1
        self.basisInds_noop_blankaction = [list(range(n)) for n in numBasisEls_noop_blankaction]

        # multipliers to go from per-label indices to tensor-product-block index
        # e.g. if map(len,basisInds) == [1,4,4] then multipliers == [ 16 4 1 ]
        self.multipliers = _np.array(_np.flipud(_np.cumprod([1] + list(
            reversed(list(numBasisEls[1:]))))), _np.int64)
        self.basisInds_action = [list(range(numBasisEls[i])) for i in actionInds]

        self.embeddedDim = embedded_dim
        self.nComponents = nComponentsInActiveBlock
        self.iActiveBlock = iActiveBlock
        self.nBlocks = nBlocks
        self.offset = sum(blocksizes[0:iActiveBlock])
        super(SVOpRep_Embedded, self).__init__(dim) 
Example #22
Source File: povm.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def __init__(self, povm_to_marginalize, all_sslbls, sslbls_after_marginalizing):
        """ TODO: docstring """
        self.povm_to_marginalize = povm_to_marginalize

        if isinstance(all_sslbls, _ld.StateSpaceLabels):
            assert(len(all_sslbls.labels) == 1), "all_sslbls should only have a single tensor product block!"
            all_sslbls = all_sslbls.labels[0]

        #now all_sslbls is a tuple of labels, like sslbls_after_marginalizing
        self.sslbls_to_marginalize = all_sslbls
        self.sslbls_after_marginalizing = sslbls_after_marginalizing
        indices_to_keep = set([list(all_sslbls).index(l) for l in sslbls_after_marginalizing])
        indices_to_remove = set(range(len(all_sslbls))) - indices_to_keep
        self.indices_to_marginalize = sorted(indices_to_remove, reverse=True)

        elements_to_sum = {}
        for k in self.povm_to_marginalize.keys():
            mk = self.marginalize_effect_label(k)
            if mk in elements_to_sum:
                elements_to_sum[mk].append(k)
            else:
                elements_to_sum[mk] = [k]
        self._elements_to_sum = {k: tuple(v) for k, v in elements_to_sum.items()}  # convert to tuples
        super(MarginalizedPOVM, self).__init__(self.povm_to_marginalize.dim, self.povm_to_marginalize._evotype) 
Example #23
Source File: test_truncated_svd.py    From mars with Apache License 2.0 6 votes vote down vote up
def setUp(self):
        # Make an X that looks somewhat like a small tf-idf matrix.
        # XXX newer versions of SciPy >0.16 have scipy.sparse.rand for this.
        shape = 60, 55
        n_samples, n_features = shape
        rng = check_random_state(42)
        X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
        X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
        X.data[:] = 1 + np.log(X.data)
        self.X = X
        self.Xdense = X.A
        self.n_samples = n_samples
        self.n_features = n_features

        self.session = new_session().as_default()
        self._old_executor = self.session._sess._executor
        self.executor = self.session._sess._executor = \
            ExecutorForTest('numpy', storage=self.session._sess._context) 
Example #24
Source File: slowreplib.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def __init__(self, embedded_op, numBasisEls, actionInds,
                 blocksizes, embedded_dim, nComponentsInActiveBlock,
                 iActiveBlock, nBlocks, dim):

        self.embedded = embedded_op
        self.numBasisEls = numBasisEls
        self.actionInds = actionInds
        self.blocksizes = blocksizes

        numBasisEls_noop_blankaction = numBasisEls.copy()
        for i in actionInds: numBasisEls_noop_blankaction[i] = 1
        self.basisInds_noop_blankaction = [list(range(n)) for n in numBasisEls_noop_blankaction]

        # multipliers to go from per-label indices to tensor-product-block index
        # e.g. if map(len,basisInds) == [1,4,4] then multipliers == [ 16 4 1 ]
        self.multipliers = _np.array(_np.flipud(_np.cumprod([1] + list(
            reversed(list(numBasisEls[1:]))))), _np.int64)
        self.basisInds_action = [list(range(numBasisEls[i])) for i in actionInds]

        self.embeddedDim = embedded_dim
        self.nComponents = nComponentsInActiveBlock
        self.iActiveBlock = iActiveBlock
        self.nBlocks = nBlocks
        self.offset = sum(blocksizes[0:iActiveBlock])
        super(DMOpRep_Embedded, self).__init__(dim) 
Example #25
Source File: polynomial.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def evaluate(self, variable_values):
        """
        Evaluate this polynomial for a given set of variable values.

        Parameters
        ----------
        variable_values : array-like
            An object that can be indexed so that `variable_values[i]` gives the
            numerical value for i-th variable (x_i).

        Returns
        -------
        float or complex
            Depending on the types of the coefficients and `variable_values`.
        """
        #FUTURE: make this function smarter (Russian peasant)
        ret = 0
        for ivar, coeff in self.coeffs.items():
            ret += coeff * _np.product([variable_values[i] for i in ivar])
        return ret 
Example #26
Source File: basis.py    From pyGSTi with Apache License 2.0 5 votes vote down vote up
def _lazy_build_elements(self):
        #LAZY building of elements (in case we never need them)
        compMxs = _np.zeros((self.size,) + self.elshape, 'complex')

        #Take kronecker product of *natural* reps of component-basis elements
        # then reshape to vectors at the end.  This requires that the vector-
        # dimension of the component spaces equals the "natural space" dimension.
        comp_els = [c.elements for c in self.component_bases]
        for i, factors in enumerate(_itertools.product(*comp_els)):
            M = _np.identity(1, 'complex')
            for f in factors:
                M = _np.kron(M, f)
            compMxs[i] = M
        self._elements = compMxs 
Example #27
Source File: tasks.py    From seizure-prediction with MIT License 5 votes vote down vote up
def flatten(data):
    if data.ndim == 2:
        return data
    if not data.ndim >= 3:
        print 'data shape', data.shape
        assert data.ndim >= 3
    s = data.shape
    out = data.reshape((np.product(s[0:2]), np.product(s[2:])))

    return out


# Load data for a given pipeline. This wraps load_data_mp to also provide FeatureConcatPipeline support.
# See load_data_mp for description of check_only and meta_only parameters. 
Example #28
Source File: transforms.py    From seizure-prediction with MIT License 5 votes vote down vote up
def apply(self, data, meta=None):
        if data.ndim == 2:
            return data.ravel()
        elif data.ndim == 3:
            s = data.shape
            return data.reshape((s[0], np.product(s[1:])))
        else:
            raise NotImplementedError() 
Example #29
Source File: basis.py    From pyGSTi with Apache License 2.0 5 votes vote down vote up
def _lazy_build_labels(self):
        self._labels = []
        comp_lbls = [c.labels for c in self.component_bases]
        for i, factor_lbls in enumerate(_itertools.product(*comp_lbls)):
            self._labels.append(''.join(factor_lbls)) 
Example #30
Source File: common.py    From lambda-packs with MIT License 5 votes vote down vote up
def central_diff_weights(Np, ndiv=1):
    """
    Return weights for an Np-point central derivative.

    Assumes equally-spaced function points.

    If weights are in the vector w, then
    derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)

    Parameters
    ----------
    Np : int
        Number of points for the central derivative.
    ndiv : int, optional
        Number of divisions.  Default is 1.

    Notes
    -----
    Can be inaccurate for large number of points.

    """
    if Np < ndiv + 1:
        raise ValueError("Number of points must be at least the derivative order + 1.")
    if Np % 2 == 0:
        raise ValueError("The number of points must be odd.")
    from scipy import linalg
    ho = Np >> 1
    x = arange(-ho,ho+1.0)
    x = x[:,newaxis]
    X = x**0.0
    for k in range(1,Np):
        X = hstack([X,x**k])
    w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
    return w