Python numpy.zeros() Examples

The following are code examples for showing how to use numpy.zeros(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: MultiLabelLayer.py    BSD 2-Clause "Simplified" License 8 votes vote down vote up
def get_a_datum(self):
        if self._compressed:
            datum = extract_sample(
                self._data[self._cur], self._mean, self._resize)
        else:
            datum = self._data[self._cur]
        # start parsing labels
        label_elems = parse_label(self._label[self._cur])
        label = np.zeros(self._label_dim)
        if not self._multilabel:
            label[0] = label_elems[0]
        else:
            for i in label_elems:
                label[i] = 1
        self._cur = (self._cur + 1) % self._sample_count
        return datum, label 
Example 2
Project: b2ac   Author: hbldh   File: ellipse.py    MIT License 8 votes vote down vote up
def polygonize(self, n=73):
        """Gets a approximate polygon array representing the ellipse.

        Note that the last point is the same as the first point, creating a closed
        polygon.

        :param n: The number of points to generate. Default is 73 (one vertex every 5 degrees).
        :type n: int
        :return: An [n  x 2] numpy array, describing the boundary vertices of
                 the polygonized ellipse.
        :rtype: :py:class:`numpy.ndarray`

        """
        t = np.linspace(0, 2 * np.pi, num=n, endpoint=True)
        out = np.zeros((len(t), 2), dtype='float')
        out[:, 0] = (self.center_point[0] +
                     self.radii[0] * np.cos(t) * np.cos(self.rotation_angle) -
                     self.radii[1] * np.sin(t) * np.sin(self.rotation_angle))
        out[:, 1] = (self.center_point[1] +
                     self.radii[0] * np.cos(t) * np.sin(self.rotation_angle) +
                     self.radii[1] * np.sin(t) * np.cos(self.rotation_angle))
        return out 
Example 3
Project: projection-methods   Author: akshayka   File: zeros.py    GNU General Public License v3.0 7 votes vote down vote up
def query(self, x_0):
        if self.contains(x_0):
            return x_0, []

        x_star = self.project(x_0) 
        if self._unqueried:
            # This is an abuse of the word hyperplane; this function
            # actually returns a set of hyperplanes that exactly identifies
            # the zero set. If added to an outer approximation, the
            # interpretation is that we are doing a presolve by forcing
            # x to be 0.
            assert np.prod(self._x.size) == x_0.shape[0]
            A = scipy.sparse.eye(x_0.shape[0])
            # Note that we signal to the dynamic polyhedron that this
            # hyperplane should be pinned. The reasoning is that this
            # "hyperplane" is simply a linear algebraic presolve, and enforcing
            # it should not increase our computational complexity if our
            # implementation is sound.
            h = [Hyperplane(self._x, A, np.zeros(x_0.shape[0]), pin=True)]
            self._unqueried = False
            self._info.append(h)
            return x_star, h
        else:
            return x_star, [] 
Example 4
Project: b2ac   Author: hbldh   File: reference.py    MIT License 6 votes vote down vote up
def fit_B2AC(points):
    """Ellipse fitting in Python with numerically unstable algorithm.

    Described `here <http://research.microsoft.com/pubs/67845/ellipse-pami.pdf>`_.

    N_POLYPOINTS.B. Do not use, since it works with almost singular matrix.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    import scipy.linalg as scla

    constraint_matrix = np.zeros((6, 6))
    constraint_matrix[0, 2] = 2
    constraint_matrix[1, 1] = -1
    constraint_matrix[2, 0] = 2

    S = _calculate_scatter_matrix_py(points[:, 0], points[:, 1])

    evals, evect = scla.eig(S, constraint_matrix)
    ind = np.where(evals == (evals[evals > 0].min()))[0][0]
    return evect[:, ind] 
Example 5
Project: b2ac   Author: hbldh   File: polygon.py    MIT License 6 votes vote down vote up
def get_bounding_box(self):
        """Get a four point Polygon describing the bounding box of the current Polygon.

        :return: A new polygon, describing this one's bounding box.
        :rtype: :py:class:`b2ac.geometry.polygon.B2ACPolygon`

        """
        mins = self.polygon_points.min(axis=0)
        maxs = self.polygon_points.max(axis=0)
        bb_pnts = np.zeros((4, 2), dtype=self.polygon_points.dtype)
        bb_pnts[0, :] = mins
        bb_pnts[1, :] = [mins[0], maxs[1]]
        bb_pnts[2, :] = maxs
        bb_pnts[3, :] = [maxs[0], mins[1]]
        out = B2ACPolygon(bb_pnts)
        return out 
Example 6
Project: cat-bbs   Author: aleju   File: create_dataset.py    MIT License 6 votes vote down vote up
def load_keypoints(image_filepath, image_height, image_width):
    """Load facial keypoints of one image."""
    fp_keypoints = "%s.cat" % (image_filepath,)
    if not os.path.isfile(fp_keypoints):
        raise Exception("Could not find keypoint coordinates for image '%s'." \
                        % (image_filepath,))
    else:
        coords_raw = open(fp_keypoints, "r").readlines()[0].strip().split(" ")
        coords_raw = [abs(int(coord)) for coord in coords_raw]
        keypoints = []
        #keypoints_arr = np.zeros((9*2,), dtype=np.int32)
        for i in range(1, len(coords_raw), 2): # first element is the number of coords
            x = np.clip(coords_raw[i], 0, image_width-1)
            y = np.clip(coords_raw[i+1], 0, image_height-1)
            keypoints.append((x, y))

        return keypoints 
Example 7
Project: explirefit   Author: codogogo   File: data_helper.py    Apache License 2.0 6 votes vote down vote up
def load_text_and_labels(path, lowercase = True, multilingual = False, distinct_labels_index = None):
	"""Loads text instances from files (one text one line), splits the data into words and generates labels (as one-hot vectors).
		Returns split sentences and labels.
	"""
    # Load data from files
	lines = [(s.lower() if lowercase else s).strip().split() for s in list(codecs.open(path,'r',encoding='utf8', errors='replace').readlines())]
	x_instances = [l[1:-1] for l in lines] if multilingual else [l[:-1] for l in lines]

	if multilingual: 
		langs = [l[0] for l in lines]
	labels = [l[-1] for l in lines]
	
	dist_labels = list(set(labels)) if distinct_labels_index is None else distinct_labels_index
	y_instances = [np.zeros(len(dist_labels)) for l in labels]
	for i in range(len(y_instances)):
		y_instances[i][dist_labels.index(labels[i])] = 1

	return [x_instances, y_instances, langs, dist_labels] if multilingual else [x_instances, y_instances, dist_labels] 
Example 8
Project: LipNet-PyTorch   Author: sailordiary   File: ctc_decoder.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def wer(self, r, h):
        # initialisation
        d = np.zeros((len(r)+1)*(len(h)+1), dtype=np.uint8)
        d = d.reshape((len(r)+1, len(h)+1))
        for i in range(len(r)+1):
            for j in range(len(h)+1):
                if i == 0:
                    d[0][j] = j
                elif j == 0:
                    d[i][0] = i

        # computation
        for i in range(1, len(r)+1):
            for j in range(1, len(h)+1):
                if r[i-1] == h[j-1]:
                    d[i][j] = d[i-1][j-1]
                else:
                    substitution = d[i-1][j-1] + 1
                    insertion    = d[i][j-1] + 1
                    deletion     = d[i-1][j] + 1
                    d[i][j] = min(substitution, insertion, deletion)

        return d[len(r)][len(h)] 
Example 9
Project: building-boundary   Author: Geodan   File: bounding_box.py    MIT License 6 votes vote down vote up
def compute_edge_angles(edges):
    """
    Compute the angles between the edges and the x-axis.

    Parameters
    ----------
    edges : (Mx2x2) array
        The coordinates of the sets of points that define the edges.

    Returns
    -------
    edge_angles : (Mx1) array
        The angles between the edges and the x-axis.
    """
    edges_count = len(edges)
    edge_angles = np.zeros(edges_count)
    for i in range(edges_count):
        edge_x = edges[i][1][0] - edges[i][0][0]
        edge_y = edges[i][1][1] - edges[i][0][1]
        edge_angles[i] = math.atan2(edge_y, edge_x)

    return np.unique(edge_angles) 
Example 10
Project: fenics-topopt   Author: zfergus   File: solver.py    MIT License 6 votes vote down vote up
def compliance_function_fdiff(self, x, dc):
        obj = self.compliance_function(x, dc)

        x0 = x.copy()
        dc0 = dc.copy()
        dcf = np.zeros(dc.shape)
        for i, v in enumerate(x):
            x = x0.copy()
            x[i] += 1e-6
            o1 = self.compliance_function(x, dc)
            x[i] = x0[i] - 1e-6
            o2 = self.compliance_function(x, dc)
            dcf[i] = (o1 - o2) / (2e-6)
        print("finite differences: {:g}".format(np.linalg.norm(dcf - dc0)))
        dc[:] = dc0

        return obj 
Example 11
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    MIT License 6 votes vote down vote up
def calculate_fdiff_stress(self, x, u, nu, side=1, dx=1e-6):
        """
        Calculate the derivative of the Von Mises stress using finite
        differences given the densities x, displacements u, and young modulus
        nu. Optionally, provide the side length (default: 1) and delta x
        (default: 1e-6).
        """
        ds = self.calculate_diff_stress(x, u, nu, side)
        dsf = numpy.zeros(x.shape)
        x = numpy.expand_dims(x, -1)
        for i in range(x.shape[0]):
            delta = scipy.sparse.coo_matrix(([dx], [[i], [0]]), shape=x.shape)
            s1 = self.calculate_stress((x + delta.A).squeeze(), u, nu, side)
            s2 = self.calculate_stress((x - delta.A).squeeze(), u, nu, side)
            dsf[i] = ((s1 - s2) / (2. * dx))[i]
        print("finite differences: {:g}".format(numpy.linalg.norm(dsf - ds)))
        return dsf 
Example 12
Project: fenics-topopt   Author: zfergus   File: solver.py    MIT License 6 votes vote down vote up
def compliance_function_fdiff(self, x, dc):
        obj = self.compliance_function(x, dc)

        x0 = x.copy()
        dc0 = dc.copy()
        dcf = np.zeros(dc.shape)
        for i, v in enumerate(x):
            x = x0.copy()
            x[i] += 1e-6
            o1 = self.compliance_function(x, dc)
            x[i] = x0[i] - 1e-6
            o2 = self.compliance_function(x, dc)
            dcf[i] = (o1 - o2) / (2e-6)
        print("finite differences: {:g}".format(np.linalg.norm(dcf - dc0)))
        dc[:] = dc0

        return obj 
Example 13
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    MIT License 6 votes vote down vote up
def calculate_fdiff_stress(self, x, u, nu, side=1, dx=1e-6):
        """
        Calculate the derivative of the Von Mises stress using finite
        differences given the densities x, displacements u, and young modulus
        nu. Optionally, provide the side length (default: 1) and delta x
        (default: 1e-6).
        """
        ds = self.calculate_diff_stress(x, u, nu, side)
        dsf = numpy.zeros(x.shape)
        x = numpy.expand_dims(x, -1)
        for i in range(x.shape[0]):
            delta = scipy.sparse.coo_matrix(([dx], [[i], [0]]), shape=x.shape)
            s1 = self.calculate_stress((x + delta.A).squeeze(), u, nu, side)
            s2 = self.calculate_stress((x - delta.A).squeeze(), u, nu, side)
            dsf[i] = ((s1 - s2) / (2. * dx))[i]
        print("finite differences: {:g}".format(numpy.linalg.norm(dsf - ds)))
        return dsf 
Example 14
Project: cs207-FinalProject   Author: PYNE-AD   File: Dual.py    MIT License 6 votes vote down vote up
def vectorizeDual(functions, order):
	n = len(functions)
	m = order + 1
	vector = np.zeros([n, m])
	for i, f in enumerate(functions):
   	 f.buildCoefficients(order)
   	 for j, coeff in enumerate(f.coefficients):
   		 vector[i,j] = coeff

	# Create a copy of one function
	f_everything = functions[0]
	f_everything.coefficients = np.zeros([n, m])
	for i in range(m):
   	 f_everything.coefficients[:, i] = vector[:, i]

	return f_everything 
Example 15
Project: unicorn-hat-hd   Author: pimoroni   File: __init__.py    MIT License 5 votes vote down vote up
def setup_buffer(width, height):
    """Set up the internal pixel buffer.

    :param width: width of buffer, ideally in multiples of 16
    :param height: height of buffer, ideally in multiples of 16

    """
    global _buffer_width, _buffer_height, _buf

    _buffer_width = width
    _buffer_height = height
    _buf = numpy.zeros((_buffer_width, _buffer_height, 3), dtype=int) 
Example 16
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
Example 17
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def get_logits(new_input, length, first=[]):
    """
    Compute the logits for a given waveform.

    First, preprocess with the TF version of MFC above,
    and then call DeepSpeech on the features.
    """
    # new_input = tf.Print(new_input, [tf.shape(new_input)])

    # We need to init DeepSpeech the first time we're called
    if first == []:
        first.append(False)
        # Okay, so this is ugly again.
        # We just want it to not crash.
        tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
        DeepSpeech.initialize_globals()
        print('initialized deepspeech globals')

    batch_size = new_input.get_shape()[0]

    # 1. Compute the MFCCs for the input audio
    # (this is differentable with our implementation above)
    empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
    new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
    features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)

    # 2. We get to see 9 frames at a time to make our decision,
    # so concatenate them together.
    features = tf.reshape(features, [new_input.get_shape()[0], -1])
    features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
    features = tf.reshape(features, [batch_size, -1, 19*26])

    # 3. Whiten the data
    mean, var = tf.nn.moments(features, axes=[0,1,2])
    features = (features-mean)/(var**.5)

    # 4. Finally we process it with DeepSpeech
    logits = DeepSpeech.BiRNN(features, length, [0]*10)

    return logits 
Example 18
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def _split_into_chunks(signal, chunks_per_second=24):
    # TODO currently broken
    raise NotImplemented("Splitting to chunks is currently broken.")
    window_length_ms = 1/chunks_per_second * 1000
    intervals = np.arange(window_length_ms, signal.shape[0], window_length_ms, dtype=np.int32)
    chunks = np.array_split(signal, intervals, axis=0)
    pad_to = _next_power_of_two(np.max([chunk.shape[0] for chunk in chunks]))
    padded_chunks = np.stack(np.concatenate([chunk, np.zeros((pad_to - chunk.shape[0],))]) for chunk in chunks)
    return padded_chunks 
Example 19
Project: SyNEThesia   Author: RunOrVeith   File: live_viewer.py    MIT License 5 votes vote down vote up
def display(self, image_generator):
        im = plt.imshow(np.zeros((1, 1, 3)), animated=True)
        def update(frame, *_):
            im.set_array(frame)
            return im,

        ani = FuncAnimation(self.fig, func=update, frames=image_generator, interval=self.pause_time, blit=True)
        plt.show() 
Example 20
Project: SyNEThesia   Author: RunOrVeith   File: synethesia.py    MIT License 5 votes vote down vote up
def random_start_img(img_size, batch_size, num_channels=3, num_ones_offset=None):
    zeros = np.zeros((*img_size, batch_size * num_channels))
    num_ones_offset = np.random.choice([1, 0], size=batch_size * num_channels)
    zeros += num_ones_offset * np.random.random(size=batch_size * num_channels)
    img = np.transpose(zeros.reshape((*img_size, batch_size, num_channels)), axes=[2, 0, 1, 3])
    return img 
Example 21
Project: SyNEThesia   Author: RunOrVeith   File: ops.py    MIT License 5 votes vote down vote up
def batch_norm(x, is_train_tensor=None):

    if is_train_tensor is None:
        is_training_tensor = tf.get_default_graph().get_tensor_by_name('is_training:0')

    normed, _, _ = tf.nn.fused_batch_norm(x, scale=tf.ones(shape=(x.get_shape()[-1],)),
                                          offset=tf.zeros(shape=(x.get_shape()[-1],)),
                                          is_training=is_train_tensor, name=None)
    return normed 
Example 22
Project: SyNEThesia   Author: RunOrVeith   File: ops.py    MIT License 5 votes vote down vote up
def bilinear_weight_initializer(filter_size, add_noise=True):

    def _initializer(shape, dtype=tf.float32, partition_info=None):
        if shape:
            # second last dimension is input, last dimension is output
            fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
            fan_out = float(shape[-1])
        else:
            fan_in = 1.0
            fan_out = 1.0

        # define weight matrix (set dtype always to float32)
        weights = np.zeros((filter_size[0], filter_size[1], int(fan_in), int(fan_out)), dtype=dtype.as_numpy_dtype())

        # get bilinear kernel
        bilinear = bilinear_filt(filter_size=filter_size)
        bilinear = bilinear / fan_out  # normalize by number of channels

        # set filter in weight matrix (also allow channel mixing)
        for i in range(weights.shape[2]):
            for j in range(weights.shape[3]):
                weights[:, :, i, j] = bilinear

        # add small noise for symmetry breaking
        if add_noise:
            # define standard deviation so that it is equal to 1/2 of the smallest weight entry
            std = np.min(bilinear) / 2
            noise = truncated_normal(shape=shape, mean=0.0,
                                                              stddev=std, seed=None, dtype=dtype)
            weights += noise

        return weights

    return _initializer 
Example 23
Project: PEAKachu   Author: tbischler   File: consensus_peak.py    ISC License 5 votes vote down vote up
def _get_peak_coverage(self):
        norm_coverage_folder = "{}/normalized_coverage".format(
            self._project_folder)
        coverage_files = [join(norm_coverage_folder, f) for f in listdir(
            norm_coverage_folder) if isfile(join(norm_coverage_folder, f))]
        wiggle_parser = WiggleParser()
        cons_value_dict = defaultdict(dict)
        for coverage_file in coverage_files:
            cons_values = np.zeros(self._consensus_length)
            with open(coverage_file, 'r') as cov_fh:
                for wiggle_entry in wiggle_parser.entries(cov_fh):
                    lib_name_and_strand = wiggle_entry.track_name
                    lib_name = '_'.join(lib_name_and_strand.split('_')[:-1])
                    lib_strand = '+' if lib_name_and_strand.split(
                        '_')[-1] == "forward" else '-'
                    replicon = wiggle_entry.replicon
                    pos_value_pairs = dict(wiggle_entry.pos_value_pairs)
                    self._get_coverage_for_replicon_peaks(
                        replicon, lib_strand, pos_value_pairs, cons_values)
            cons_value_dict[lib_name][lib_strand] = cons_values
        # combine strands
        comb_cons_value_dict = {}
        for lib in cons_value_dict:
            comb_cons_value_dict[lib] = np.zeros(self._consensus_length)
            for strand in cons_value_dict[lib]:
                comb_cons_value_dict[lib] += cons_value_dict[lib][strand]
        return comb_cons_value_dict 
Example 24
Project: 4enRaya_Python   Author: Yussoft   File: CuatroEnRaya.py    GNU General Public License v2.0 5 votes vote down vote up
def comenzarPartida():

    global juegoComenzado
    global jugador1
    global jugador2
    global texto
    global canvas
    global matrix
    global fin
    global usados
    
    if juegoComenzado == False:
        jugador1 = True

    #Reiniciamos la partida
    elif juegoComenzado == True:
        
        jugador1 = True
        jugador2 = False
        canvas.delete("all")
        #Canvas donde se dibuja todo
        canvas.create_line(0,100,400,100) #Lineas que dibujan el tablero 4x4
        canvas.create_line(0,200,400,200)
        canvas.create_line(0,300,400,300)
        canvas.create_line(0,400,400,400)
        canvas.create_line(100,0,100,400)
        canvas.create_line(200,0,200,400)
        canvas.create_line(300,0,300,400)
        canvas.create_line(400,0,400,400)
        matrix = numpy.zeros((4,4)) 
        fin = False
        usados=0
        
    texto.set("Estado del juego: Turno jugador 1.")
    juegoComenzado = True 
Example 25
Project: b2ac   Author: hbldh   File: matrix_operations.py    MIT License 5 votes vote down vote up
def inverse_3by3_double(M):
    """C style inverse of a flattened 3 by 3 matrix, returning full matrix in
    floating-point, also in flattened format.

    :param M: The matrix to find inverse to.
    :type M: :py:class:`numpy.ndarray`
    :return: The inverse matrix.
    :rtype: :py:class:`numpy.ndarray`

    """
    if len(M.shape) > 1:
        M = M.flatten()

    M = np.array(M, 'float')

    determinant = 0.
    adj_M = np.zeros((9,), 'float')

    # First row of adjugate matrix
    adj_M[0] = (M[4] * M[8] - M[7] * M[5])  # Det #0
    adj_M[1] = -(M[1] * M[8] - M[7] * M[2])
    adj_M[2] = (M[1] * M[5] - M[4] * M[2])

    # Second row of adjugate matrix
    adj_M[3] = -(M[3] * M[8] - M[6] * M[5])  # Det #1
    adj_M[4] = (M[0] * M[8] - M[6] * M[2])
    adj_M[5] = -(M[0] * M[5] - M[3] * M[2])

    # Third row of adjugate matrix
    adj_M[6] = (M[3] * M[7] - M[6] * M[4])  # Det #2
    adj_M[7] = -(M[0] * M[7] - M[6] * M[1])
    adj_M[8] = (M[0] * M[4] - M[3] * M[1])

    determinant += M[0] * adj_M[0]
    determinant += M[1] * adj_M[3]  # Using addition since minus is integrated in adjugate matrix.
    determinant += M[2] * adj_M[6]

    return (adj_M / determinant) 
Example 26
Project: b2ac   Author: hbldh   File: qr_algorithm.py    MIT License 5 votes vote down vote up
def QR_algorithm_shift_Givens_double(A):
    """The QR algorithm with largest value shift for finding eigenvalues.

    Using Givens rotations for finding RQ.

    :param A: The square matrix to find eigenvalues of.
    :type A: :py:class:`numpy.ndarray`
    :return: The eigenvalues.
    :rtype: list

    """

    # First, tridiagonalize the input matrix to a Hessenberg matrix.
    T = ma.convert_to_Hessenberg_Givens_double(A.copy())

    m, n = T.shape
    if n != m:
        raise np.linalg.LinAlgError("Array must be square.")

    convergence_measure = []
    eigenvalues = np.zeros((m, ), dtype='float')
    m -= 1

    while m > 0:
        # Obtain the shift from the lower right corner of the matrix.
        mu_matrix = np.eye(T.shape[0]) * T[m, m]
        # Perform Givens QR step (which returns RQ) on the shifted
        # matrix and then shift it back.
        T = Givens_QR_step_double(T - mu_matrix) + mu_matrix
        # Add convergence information and extract eigenvalue if close enough.
        convergence_measure.append(np.abs(T[m, m - 1]))
        if convergence_measure[-1] < QR_ALGORITHM_TOLERANCE:
            eigenvalues[m] = T[m, m]
            T = T[:m, :m]
            m -= 1

    eigenvalues[0] = T
    return eigenvalues, convergence_measure 
Example 27
Project: b2ac   Author: hbldh   File: qr_algorithm.py    MIT License 5 votes vote down vote up
def QR_algorithm_shift_Givens_int(A):
    """The QR algorithm with largest value shift for finding eigenvalues, in integer precision.

    Using Givens rotations for finding RQ.

    :param A: The square matrix to find eigenvalues of.
    :type A: :py:class:`numpy.ndarray`
    :return: The eigenvalues.
    :rtype: list

    """

    A, scale = fp.scale_64bit_matrix(A.copy())

    # First, tridiagonalize the input matrix to a Hessenberg matrix.
    T = ma.convert_to_Hessenberg_Givens_int(A)

    m, n = T.shape
    if n != m:
        raise np.linalg.LinAlgError("Array must be square.")

    convergence_measure = []
    eigenvalues = np.zeros((m, ), dtype='int64')
    m -= 1

    while m > 0:
        # Obtain the shift from the lower right corner of the matrix.
        mu_matrix = np.eye(T.shape[0], dtype='int64') * T[m, m]
        # Perform Givens QR step (which returns RQ) on the shifted
        # matrix and then shift it back.
        T = Givens_QR_step_int(T - mu_matrix) + mu_matrix
        # Add convergence information and extract eigenvalue if close enough.
        convergence_measure.append(np.abs(T[m, m - 1]))
        if convergence_measure[-1] <= 1:
            eigenvalues[m] = T[m, m]
            T = T[:m, :m]
            m -= 1

    eigenvalues[0] = T
    return eigenvalues << scale, convergence_measure 
Example 28
Project: SpatialPooler   Author: CSDUMMI   File: spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def overlap(x,y):
    # You can't overlap sdrs
    # with different shapes
    if x.shape != y.shape:
        return None
    result = np.zeros(x.shape,dtype=np.bool_)
    for i in range(x.shape[0]):
        result[i] = x[i] and y[i]
    return result 
Example 29
Project: SpatialPooler   Author: CSDUMMI   File: spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def run(self,input_sdr):
        self.current = 0
        output = np.zeros(len(self.collumns),dtype=np.bool_)
        for i in range(len(self.collumns)):
            output[i] = self.spatial_pooler(input_sdr)
            self.current += 1
        self.current = 0
        return output 
Example 30
Project: chainer-openai-transformer-lm   Author: soskek   File: utils.py    MIT License 5 votes vote down vote up
def stsb_label_encoding(labels, nclass=6):
    """
    Label encoding from Tree LSTM paper (Tai, Socher, Manning)
    """
    Y = np.zeros((len(labels), nclass)).astype(np.float32)
    for j, y in enumerate(labels):
        for i in range(nclass):
            if i == np.floor(y) + 1:
                Y[j, i] = y - np.floor(y)
            if i == np.floor(y):
                Y[j, i] = np.floor(y) - y + 1
    return Y 
Example 31
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def transform_roc(X1, X2, X3):
    n_batch = len(X1)
    xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32)
    mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32)
    start = encoder['_start_']
    delimiter = encoder['_delimiter_']
    for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
        x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [clf_token]
        x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
        l12 = len(x12)
        l13 = len(x13)
        xmb[i, 0, :l12, 0] = x12
        xmb[i, 1, :l13, 0] = x13
        mmb[i, 0, :l12] = 1
        mmb[i, 1, :l13] = 1
    xmb[:, :, :, 1] = np.arange(
        n_vocab + n_special, n_vocab + n_special + n_ctx)
    return xmb, mmb 
Example 32
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def transform_sst(X1):
    n_batch = len(X1)
    xmb = np.zeros((n_batch, 1, n_ctx, 2), dtype=np.int32)
    mmb = np.zeros((n_batch, 1, n_ctx), dtype=np.float32)
    start = encoder['_start_']
    delimiter = encoder['_delimiter_']
    for i, x1, in enumerate(X1):
        x1 = [start] + x1[:max_len] + [clf_token]
        l1 = len(x1)
        xmb[i, 0, :l1, 0] = x1
        mmb[i, 0, :l1] = 1
    xmb[:, :, :, 1] = np.arange(
        n_vocab + n_special, n_vocab + n_special + n_ctx)
    return xmb, mmb 
Example 33
Project: projection-methods   Author: akshayka   File: zeros.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, x):
        """
        Args:
            x (cvxpy.Variable): a symbolic representation of
                members of the set
        """
        constr = [x == np.zeros(np.prod(x.size))]
        self._unqueried = True
        super(Zeros, self).__init__(x, constr) 
Example 34
Project: projection-methods   Author: akshayka   File: zeros.py    GNU General Public License v3.0 5 votes vote down vote up
def project(self, x_0):
        return x_0 if self.contains(x_0) else np.zeros(x_0.shape) 
Example 35
Project: projection-methods   Author: akshayka   File: soc.py    GNU General Public License v3.0 5 votes vote down vote up
def project(self, x_0):
        if self.contains(x_0):
            return x_0
        z = x_0[:-1]
        t = x_0[-1]
        norm_z = np.linalg.norm(z, 2)

        # As given in [BV04, Chapter 8.Exercises]
        if norm_z <= -t:
            # this certainly will never happen when t > 0
            return np.zeros(np.shape(x_0))
        elif self._contains(norm_z, t):
            return x_0
        else:
            return 0.5 * (1 + t/norm_z) * np.append(z, norm_z) 
Example 36
Project: projection-methods   Author: akshayka   File: cartesian_product.py    GNU General Public License v3.0 5 votes vote down vote up
def project(self, x_0):
        assert self._shape == x_0.shape, \
            'cone shape (%s) != x_0 shape (%s)' % (
            str(self._shape), str(x_0.shape))
        x_star = np.zeros(x_0.shape)
        for s, slx in zip(self.sets, self.slices):
            x_star[slx] = s.project(x_0[slx])
        return x_star 
Example 37
Project: projection-methods   Author: akshayka   File: test_affine_set.py    GNU General Public License v3.0 5 votes vote down vote up
def test_projection(self):
        """Test projection, query methods of the AffineSet oracle."""
        m = 150
        n = 200
        x = cvxpy.Variable(n)

        sol = np.random.randn(n)
        A = np.random.randn(m, n)
        b = A.dot(sol)

        affine = AffineSet(x, A, b)
        constr = affine._constr

        # Test idempotency
        x_0 = sol
        x_star = affine.project(x_0)
        self.assertTrue(affine.contains(x_star))
        self.assertTrue(np.array_equal(x_0, x_star), "projection not "
            "idemptotent")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, affine, idempotent=True)
        
        # Test random x_0
        x_0 = np.random.randn(n)
        while (np.isclose(A.dot(x_0), np.zeros(m)).all()):
            x_0 = np.random.randn(n)
        x_star = affine.project(x_0)
        self.assertTrue(np.isclose(A.dot(x_star), b).all())
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, affine, idempotent=False) 
Example 38
Project: projection-methods   Author: akshayka   File: dykstra.py    GNU General Public License v3.0 5 votes vote down vote up
def solve(self, problem):
        left_set = problem.sets[0]
        right_set = problem.sets[1]

        iterate = (self._initial_iterate if
            self._initial_iterate is not None else np.ones(problem.dimension))

        # (p_n), (q_n) are the auxiliary sequences, (a_n), (b_n) the main
        # sequences, defined in Bauschke's 98 paper
        # (Dykstra's Alternating Projection Algorithm for Two Sets).
        self.p = [None] * (self.max_iters + 1)
        self.q = [None] * (self.max_iters + 1)
        self.a = [None] * (self.max_iters + 1)
        self.b = [None] * (self.max_iters + 1)
        zero_vector = np.zeros(problem.dimension)
        self.p[0] = self.q[0] = zero_vector
        self.b[0] = self.a[0] = iterate
        residuals = []

        status = Optimizer.Status.INACCURATE
        for n in xrange(1, self.max_iters + 1):
            if self.verbose:
                print 'iteration %d' % n
            # TODO(akshayka): Robust stopping criterion
            residuals.append(self._compute_residual(
                self.b[n-1], left_set, right_set))
            if self.verbose:
                print '\tresidual: %e' % sum(residuals[-1])
            if self._is_optimal(residuals[-1]):
                status = Optimizer.Status.OPTIMAL
                if not self.do_all_iters:
                    break

            self.a[n] = left_set.project(self.b[n-1] + self.p[n-1])
            self.b[n] = right_set.project(self.a[n] + self.q[n-1])
            self.p[n] = self.b[n-1] + self.p[n-1] - self.a[n]
            self.q[n] = self.a[n] + self.q[n-1] - self.b[n]

        # TODO(akshayka): does it matter if I return self.b vs self.a?
        # the first implementation returned self.a ...
        return self.b, residuals, status 
Example 39
Project: explirefit   Author: codogogo   File: data_helper.py    Apache License 2.0 5 votes vote down vote up
def build_text_and_labels(texts, class_labels, lowercase = True, multilingual = False, langs = None, distinct_labels_index = None):
	# Load data from files
	lines = [(text.lower() if lowercase else text).strip().split() for text in texts]
	x_instances = [l[1:-1] for l in lines] if multilingual else [l[:-1] for l in lines]
	
	dist_labels = list(set(class_labels)) if distinct_labels_index is None else distinct_labels_index
	y_instances = [np.zeros(len(dist_labels)) for l in class_labels]
	for i in range(len(y_instances)):
		y_instances[i][dist_labels.index(class_labels[i])] = 1

	return [x_instances, y_instances, langs, dist_labels] if multilingual else [x_instances, y_instances, dist_labels] 
Example 40
Project: explirefit   Author: codogogo   File: text_embeddings.py    Apache License 2.0 5 votes vote down vote up
def aggregate_phrase_embedding(words, stopwords, embs, emb_size, l2_norm_vec = True, lang = 'en'):
	vec_res = np.zeros(emb_size)
	fit_words = [w.lower() for w in words if w.lower() not in stopwords and w.lower() in embs.lang_vocabularies[lang]]
	if len(fit_words) == 0:
		return None

	for w in fit_words:
		vec_res += embs.get_vector(lang, w) 	
	res = np.multiply(1.0 / (float(len(fit_words))), vec_res)
	if l2_norm_vec:
		res = np.multiply(1.0 / np.linalg.norm(res), res)
	return res 
Example 41
Project: explirefit   Author: codogogo   File: confusion_matrix.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, labels = [], predictions = [], gold = [], one_hot_encoding = False, class_indices = False):
		# rows are true labels, columns predictions
		self.matrix = np.zeros(shape = (len(labels), len(labels)))
		self.labels = labels

		if len(predictions) != len(gold):
			raise ValueError("Predictions and gold labels do not have the same count.")
		for i in range(len(predictions)):
			index_pred = np.argmax(predictions[i]) if one_hot_encoding else (predictions[i] if class_indices else labels.index(predictions[i]))
			index_gold = np.argmax(gold[i]) if one_hot_encoding else (gold[i] if class_indices else labels.index(gold[i]))
			self.matrix[index_gold][index_pred] += 1

		if len(predictions) > 0: 
			self.compute_all_scores() 
Example 42
Project: explirefit   Author: codogogo   File: simple_stats.py    Apache License 2.0 5 votes vote down vote up
def covariance_matrix(first, second):
	if first.shape[0] != second.shape[0]:
		raise ValueError("Input matrices must have the same number of row vectors (same first dimensions)")
	
	cov_mat = np.zeros((first.shape[1], second.shape[1]))
	for i in range(first.shape[0]):
		cov_mat = np.add(cov_mat, np.matmul(np.transpose([first[i]]), np.array([second[i]])))
	cov_mat = np.multiply(1.0 / float(first.shape[0]), cov_mat)
	return cov_mat 
Example 43
Project: rhodonite   Author: nestauk   File: misc.py    MIT License 5 votes vote down vote up
def edge_property_as_matrix(g, prop_name):
    """edge_property_as_matrix
    Returns an edge property as a matrix. If the graph is undirected, a
    symmetric graph is returned.
    
    Parameters
    ----------
        g : :obj:`graph_tool.Graph` 
            A graph-tool type graph.
        prop_name : :obj:`str`
            The name of an edge property belonging to Graph, g.

    Returns
    -------
     mat : :obj:`np.array` 
        The edge property in matrix form, with each
    """
    n_vertices = len([_ for _ in g.vertices()])
    mat = np.zeros((n_vertices, n_vertices))
    prop = g.ep[prop_name]
    edges = sorted(g.edges())
    if g.is_directed():
        for e in edges:
            s = int(e.source())
            t = int(e.target())            
            mat[s][t] = prop[e]
    else:
        for e in edges:
            s = int(e.source())
            t = int(e.target())            
            mat[s][t] = prop[e]
            mat[t][s] = prop[e]
    return mat 
Example 44
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def specification(self, specification):
        if isinstance(specification, (int)):
            if np.abs(specification) > self._triangsamples.vertlist.shape[0]:
                raise ValueError("""The Number of selected basic functions is
                too large.""")
            else:
                if specification == 0:
                    self._specification = \
                        np.ones(self._triangsamples.vertlist.shape[0])
                else:
                    self._specification = \
                        np.zeros(self._triangsamples.vertlist.shape[0])
                    if specification > 0:
                        self._specification[:specification] = 1
                    else:
                        self._specification[specification:] = 1
        elif isinstance(specification, (list, tuple, np.ndarray)):
            specification = np.asarray(specification)
            if specification.shape[0] != self._triangsamples.vertlist.shape[1]:
                raise IndexError("""The length of the specification vector
                does not match the number of spatial sample points. """)
            else:
                self._specification = specification
        else:
            raise TypeError("""The parameter specification has to be
            int or a vecor""") 
Example 45
Project: UR5_Controller   Author: tsinghua-rll   File: API.py    MIT License 5 votes vote down vote up
def from_q_to_rad_axis(q):
        """
        :param q: quaternion in w-i-j-k format
        :return: rad in format x-y-z
        """
        norm_axis = math.sqrt(q[1] ** 2 + q[2] ** 2 + q[3] ** 2)
        rad = 2.0 * math.atan2(norm_axis, q[0])
        if norm_axis < 1e-5:
            return np.zeros((3,), dtype=np.float32)
        else:
            return rad * np.asarray(q[1:], dtype=np.float32) / norm_axis 
Example 46
Project: osqf2015   Author: mvaz   File: model.py    MIT License 5 votes vote down vote up
def fhs(self, n_scenarios=250, start_date=None, end_date=None):
        x = sliding_window(n_scenarios+1, range(len(self.ts.index)))
        scenarios = np.zeros((len(self.ts.index), n_scenarios+1))
        for i, el in enumerate(x):
            l = list(el)
            cur_idx, hist_idx = l[-1], l[:-1]
            neutral = self.ts.Value.values[cur_idx]
            ret = self.ts.DevolLogReturns.values[hist_idx]
            vol = self.ts.Vola.values[cur_idx]
            scenarios[cur_idx, 1:] = self.scenario_values(ret, neutral, vol)
            scenarios[cur_idx, 0] = neutral
        return scenarios 
Example 47
Project: fenics-topopt   Author: zfergus   File: boundary_conditions.py    MIT License 5 votes vote down vote up
def get_forces(self):
        # Return the force vector for the problem
        ndof = 2 * (self.nelx + 1) * (self.nely + 1)
        f = np.zeros((ndof, 1))
        f[1, 0] = -1
        return f 
Example 48
Project: fenics-topopt   Author: zfergus   File: filter.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, rmin):
        """
        Filter: Build (and assemble) the index+data vectors for the coo matrix
        format.
        """
        nfilter = int(nelx * nely * ((2 * (np.ceil(rmin) - 1) + 1)**2))
        iH = np.zeros(nfilter)
        jH = np.zeros(nfilter)
        sH = np.zeros(nfilter)
        cc = 0
        for i in range(nelx):
            for j in range(nely):
                row = i * nely + j
                kk1 = int(np.maximum(i - (np.ceil(rmin) - 1), 0))
                kk2 = int(np.minimum(i + np.ceil(rmin), nelx))
                ll1 = int(np.maximum(j - (np.ceil(rmin) - 1), 0))
                ll2 = int(np.minimum(j + np.ceil(rmin), nely))
                for k in range(kk1, kk2):
                    for l in range(ll1, ll2):
                        col = k * nely + l
                        fac = rmin - np.sqrt(
                            ((i - k) * (i - k) + (j - l) * (j - l)))
                        iH[cc] = row
                        jH[cc] = col
                        sH[cc] = np.maximum(0.0, fac)
                        cc = cc + 1
        # Finalize assembly and convert to csc format
        self.H = scipy.sparse.coo_matrix((sH, (iH, jH)),
            shape=(nelx * nely, nelx * nely)).tocsc()
        self.Hs = self.H.sum(1) 
Example 49
Project: fenics-topopt   Author: zfergus   File: solver.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, volfrac, penal, rmin, ft, gui, bc):
        self.n = nelx * nely
        self.opt = nlopt.opt(nlopt.LD_MMA, self.n)
        self.passive = bc.get_passive_elements()
        self.xPhys = np.ones(self.n)
        if self.passive is not None:
            self.xPhys[self.passive] = 0

        # set bounds
        ub = np.ones(self.n, dtype=float)
        self.opt.set_upper_bounds(ub)
        lb = np.zeros(self.n, dtype=float)
        self.opt.set_lower_bounds(lb)

        # set stopping criteria
        self.opt.set_maxeval(2000)
        self.opt.set_ftol_rel(0.001)

        # set objective and constraint functions
        self.opt.set_min_objective(self.compliance_function)
        self.opt.add_inequality_constraint(self.volume_function, 0)

        # setup filter
        self.ft = ft
        self.filtering = Filter(nelx, nely, rmin)

        # setup problem def
        self.init_problem(nelx, nely, penal, bc)
        self.volfrac = volfrac

        # set GUI callback
        self.init_gui(gui) 
Example 50
Project: fenics-topopt   Author: zfergus   File: problem.py    MIT License 5 votes vote down vote up
def build_indices(self, nelx, nely):
        """ FE: Build the index vectors for the for coo matrix format. """
        self.KE = self.lk()
        self.edofMat = np.zeros((nelx * nely, 8), dtype=int)
        for elx in range(nelx):
            for ely in range(nely):
                el = ely + elx * nely
                n1 = (nely + 1) * elx + ely
                n2 = (nely + 1) * (elx + 1) + ely
                self.edofMat[el, :] = np.array([2 * n1 + 2, 2 * n1 + 3,
                    2 * n2 + 2, 2 * n2 + 3, 2 * n2, 2 * n2 + 1, 2 * n1,
                    2 * n1 + 1])
        # Construct the index pointers for the coo format
        self.iK = np.kron(self.edofMat, np.ones((8, 1))).flatten()
        self.jK = np.kron(self.edofMat, np.ones((1, 8))).flatten()