Python numpy.array() Examples

The following are code examples for showing how to use numpy.array(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: b2ac   Author: hbldh   File: ellipse.py    MIT License 10 votes vote down vote up
def polygonize(self, n=73):
        """Gets a approximate polygon array representing the ellipse.

        Note that the last point is the same as the first point, creating a closed
        polygon.

        :param n: The number of points to generate. Default is 73 (one vertex every 5 degrees).
        :type n: int
        :return: An [n  x 2] numpy array, describing the boundary vertices of
                 the polygonized ellipse.
        :rtype: :py:class:`numpy.ndarray`

        """
        t = np.linspace(0, 2 * np.pi, num=n, endpoint=True)
        out = np.zeros((len(t), 2), dtype='float')
        out[:, 0] = (self.center_point[0] +
                     self.radii[0] * np.cos(t) * np.cos(self.rotation_angle) -
                     self.radii[1] * np.sin(t) * np.sin(self.rotation_angle))
        out[:, 1] = (self.center_point[1] +
                     self.radii[0] * np.cos(t) * np.sin(self.rotation_angle) +
                     self.radii[1] * np.sin(t) * np.cos(self.rotation_angle))
        return out 
Example 2
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 7 votes vote down vote up
def __init__(self, input_wave_file, output_wave_file, target_phrase):
        self.pop_size = 100
        self.elite_size = 10
        self.mutation_p = 0.005
        self.noise_stdev = 40
        self.noise_threshold = 1
        self.mu = 0.9
        self.alpha = 0.001
        self.max_iters = 3000
        self.num_points_estimate = 100
        self.delta_for_gradient = 100
        self.delta_for_perturbation = 1e3
        self.input_audio = load_wav(input_wave_file).astype(np.float32)
        self.pop = np.expand_dims(self.input_audio, axis=0)
        self.pop = np.tile(self.pop, (self.pop_size, 1))
        self.output_wave_file = output_wave_file
        self.target_phrase = target_phrase
        self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase])) 
Example 3
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm4_crf_train.py    MIT License 7 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 4
Project: speed_estimation   Author: NeilNie   File: speed_predictor.py    MIT License 7 votes vote down vote up
def predict_speed(self, image):

        """
        predict the steering angle given the image. Only return results if
        self.input >= configs.length
        :param image:   input image
        :return:        steering angle
        """

        image = cv2.resize(image, (configs.IMG_WIDTH, configs.IMG_HEIGHT))

        if len(self.inputs) < configs.LENGTH:
            self.inputs.append(image)

        if len(self.inputs) == configs.LENGTH:
            prediction = self.model.model.predict(np.array([self.inputs]))[0][0]
            self.inputs.pop(0)
            return prediction

        if len(self.inputs) > configs.LENGTH:
            raise ValueError("Input length can't be longer than network input length")

        return 0.0 
Example 5
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: BasePythonDataLayer.py    BSD 2-Clause "Simplified" License 7 votes vote down vote up
def setup(self, bottom, top):
        layer_params = yaml.load(self.param_str)
        self._layer_params = layer_params
        # default batch_size = 256
        self._batch_size = int(layer_params.get('batch_size', 256))
        self._resize = layer_params.get('resize', -1)
        self._mean_file = layer_params.get('mean_file', None)
        self._source_type = layer_params.get('source_type', 'CSV')
        self._shuffle = layer_params.get('shuffle', False)
        # read image_mean from file and preload all data into memory
        # will read either file or array into self._mean
        self.set_mean()
        self.preload_db()
        self._compressed = self._layer_params.get('compressed', True)
        if not self._compressed:
            self.decompress_data() 
Example 6
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 7 votes vote down vote up
def QR_factorisation_Householder_double(A):
    """Perform QR factorisation in double floating-point precision.

    :param A: The matrix to factorise.
    :type A: :py:class:`numpy.ndarray`
    :returns: The matrix Q and the matrix R.
    :rtype: tuple

    """
    A = np.array(A, 'float')

    n, m = A.shape
    V = np.zeros_like(A, 'float')
    for k in xrange(n):
        V[k:, k] = A[k:, k].copy()
        V[k, k] += np.sign(V[k, k]) * np.linalg.norm(V[k:, k], 2)
        V[k:, k] /= np.linalg.norm(V[k:, k], 2)
        A[k:, k:] -= 2 * np.outer(V[k:, k], np.dot(V[k:, k], A[k:, k:]))
    R = np.triu(A[:n, :n])

    Q = np.eye(m, n)
    for k in xrange((n - 1), -1, -1):
        Q[k:, k:] -= np.dot((2 * (np.outer(V[k:, k], V[k:, k]))), Q[k:, k:])
    return Q, R 
Example 7
Project: iglovikov_segmentation   Author: ternaus   File: dataset.py    MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        image_path = self.image_paths[idx]

        image = load_rgb(image_path, lib=self.imread_library)

        # apply transformations
        normalized_image = self.transform(image=image)["image"]

        if self.factor is not None:
            normalized_image, pads = pad(normalized_image, factor=self.factor)

            return {
                "image_id": image_path.stem,
                "features": tensor_from_rgb_image(normalized_image),
                "pads": np.array(pads),
            }

        return {"image_id": image_path.stem, "features": tensor_from_rgb_image(normalized_image)} 
Example 8
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 9
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 10
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 11
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 12
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_time_paper.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 13
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 14
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_fc_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 15
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train_paper.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 16
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_fc_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 17
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train_paper.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 18
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 19
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 20
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 21
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_share_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 22
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_share_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 23
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_time.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 24
Project: multi-embedding-cws   Author: wangjksjtu   File: baseline_crf_seg.py    MIT License 6 votes vote down vote up
def TransRawData(test_data, vob_char_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)

        X_char.append(lX_char)

    inp.close()
    return np.array(X_char) 
Example 25
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletDataLayer.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_next_minibatch(self):
        if self._prefetch:
            # get mini-batch from prefetcher
            batch = self._conn.recv()
        else:
            # generate using in-thread functions
            data = []
            p_data = []
            n_data = []
            label = []
            for i in range(self._batch_size):
                datum_ = self.get_a_datum()
                data.append(datum_[0])
                p_data.append(datum_[1])
                n_data.append(datum_[2])
                if len(datum_) == 4:
                    # datum and label / margin
                    label.append(datum_[-1])
            batch = [np.array(data),
                     np.array(p_data),
                     np.array(n_data)]
            if len(label):
                label = np.array(label).reshape(self._batch_size, 1, 1, 1)
                batch.append(label)
        return batch 
Example 26
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletDataLayer.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_next_minibatch(self):
        # generate using in-thread functions
        data = []
        p_data = []
        n_data = []
        label = []
        for i in range(self._batch_size):
            datum_ = self.get_a_datum()
            # print(len(datum_), ":".join([str(x.shape) for x in datum_]))
            data.append(datum_[0])
            p_data.append(datum_[1])
            n_data.append(datum_[2])
            if len(datum_) == 4:
                # datum and label / margin
                label.append(datum_[-1])
        batch = [np.array(data),
                 np.array(p_data),
                 np.array(n_data)]
        if len(label):
            label = np.array(label).reshape(self._batch_size, 1, 1, 1)
            batch.append(label)
        return batch 
Example 27
Project: PEAKachu   Author: tbischler   File: consensus_peak.py    ISC License 6 votes vote down vote up
def _get_coverage_for_replicon_peaks(self, replicon, lib_strand,
                                         pos_value_pairs, cons_values):
        for peak in self._replicon_peak_dict[replicon][lib_strand]:
            value_list = []
            peak_center = int((peak[0] + peak[1]) / 2)
            if self._consensus_length % 2 == 0:
                cons_start = peak_center - (
                    int(self._consensus_length / 2) - 1)
                cons_end = peak_center + int(self._consensus_length / 2)
            else:
                cons_start = peak_center - (
                    int(self._consensus_length / 2) - 1)
                cons_end = peak_center + (int(self._consensus_length / 2) + 1)
            for pos in range(cons_start, cons_end + 1):
                value_list.append(abs(pos_value_pairs.get(pos, 0.0)))
            cons_values += np.array(value_list) 
Example 28
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 6 votes vote down vote up
def convert_to_Hessenberg_Givens_double(A):
    n, m = A.shape
    A = np.array(A, 'float')
    for i in xrange(m):
        for k in xrange(m - 1, i + 1, -1):
            c, s = Givens_rotation_double(A[k - 1, i], A[k, i])
            for j in xrange(m):
                tau_1 = A[k-1, j]
                tau_2 = A[k, j]
                A[k-1, j] = ((tau_1 * c) - (tau_2 * s))
                A[k, j] = ((tau_1 * s) + (tau_2 * c))
            for j in xrange(n):
                tau_1 = A[j, k-1]
                tau_2 = A[j, k]
                A[j, k-1] = ((tau_1 * c) - (tau_2 * s))
                A[j, k] = ((tau_1 * s) + (tau_2 * c))
    return np.triu(A, -1) 
Example 29
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 6 votes vote down vote up
def convert_to_Hessenberg_Givens_int(A):
    m, n = A.shape
    A = np.array(A, 'int64')
    for i in xrange(m):
        for k in xrange(m - 1, i + 1, -1):
            c_n, s_n, denominator = Givens_rotation_int(A[k - 1, i], A[k, i])
            for j in xrange(m):
                tau_1 = A[k-1, j]
                tau_2 = A[k, j]
                A[k-1, j] = ((tau_1 * c_n) - (tau_2 * s_n)) // denominator
                A[k, j] = ((tau_1 * s_n) + (tau_2 * c_n)) // denominator
            for j in xrange(n):
                tau_1 = A[j, k-1]
                tau_2 = A[j, k]
                A[j, k-1] = ((tau_1 * c_n) - (tau_2 * s_n)) // denominator
                A[j, k] = ((tau_1 * s_n) + (tau_2 * c_n)) // denominator

    return np.triu(A, -1) 
Example 30
Project: b2ac   Author: hbldh   File: matrix_operations.py    MIT License 6 votes vote down vote up
def matrix_add_symmetric(M, M_sym):
    """Add a regular matrix and a symmetric one.

    :param M: A [3x3] matrix to add with symmetric matrix.
    :type M: :py:class:`numpy.ndarray`
    :param M_sym: A [6x1] array to add with M.
    :type M_sym: :py:class:`numpy.ndarray`
    :return: The sum of the two matrices.
    :rtype: :py:class:`numpy.ndarray`

    """
    M[0, 0] += M_sym[0]
    M[0, 1] += M_sym[1]
    M[1, 0] += M_sym[1]
    M[0, 2] += M_sym[2]
    M[2, 0] += M_sym[2]

    M[1, 1] += M_sym[3]
    M[1, 2] += M_sym[4]
    M[2, 1] += M_sym[4]

    M[2, 2] += M_sym[5]

    return M 
Example 31
Project: b2ac   Author: hbldh   File: double.py    MIT License 6 votes vote down vote up
def _calculate_M_and_T_double(points):
    """Part of the B2AC ellipse fitting algorithm, calculating the M and T
     matrices needed.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: Matrices M and T.
    :rtype: tuple

    """
    S = _calculate_scatter_matrix_double(points[:, 0], points[:, 1])
    S1 = S[:3, :3]
    S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
    S3_inv = mo.inverse_symmetric_3by3_double(S3).reshape((3, 3))
    S2 = S[:3, 3:]
    T = -np.dot(S3_inv, S2.T)
    M_term_2 = np.dot(S2, T)
    M = S1 + M_term_2
    M[[0, 2], :] = M[[2, 0], :] / 2
    M[1, :] = -M[1, :]

    return M, T 
Example 32
Project: model-api-sequence   Author: evandowning   File: evaluation.py    GNU General Public License v3.0 6 votes vote down vote up
def sequence_generator(fn,n):
    xSet = np.array([])
    ySet = np.array([])

    x = np.array([])
    y = np.array([])

    num = 0

    # Read in sample's sequences
    with open(fn, 'rb') as fr:
        for e in enumerate(range(n)):
            t = pkl.load(fr)
            x = t[0]
            y = t[1]

            if len(xSet) == 0:
                xSet = x
                ySet = y
            else:
                xSet = np.vstack([xSet,x])
                ySet = np.append(ySet,y)

    return xSet,ySet 
Example 33
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def db(audio):
    if len(audio.shape) > 1:
        maxx = np.max(np.abs(audio), axis=1)
        return 20 * np.log10(maxx) if np.any(maxx != 0) else np.array([0])
    maxx = np.max(np.abs(audio))
    return 20 * np.log10(maxx) if maxx != 0 else np.array([0]) 
Example 34
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def save_wav(audio, output_wav_file):
    wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16))
    print('output dB', db(audio)) 
Example 35
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 36
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def getctcloss(self, input_audio_batch, target_phrase, decode=False):
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)

        if decode:
            return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
        else:
            return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) 
Example 37
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
Example 38
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_seg_nowubi.py    MIT License 5 votes vote down vote up
def TransRawData(test_data, vob_char_dict, vob_pinyin_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []
    X_pinyin = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        lX_pinyin = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])
            if vob_pinyin_dict.has_key(char):
                lX_pinyin.append(vob_pinyin_dict[char])
            else:
                lX_pinyin.append(vob_pinyin_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)
            lX_pinyin.append(0)

        X_char.append(lX_char)
        X_pinyin.append(lX_pinyin)

    inp.close()
    return np.array(X_char), np.array(X_pinyin) 
Example 39
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_seg_nopy.py    MIT License 5 votes vote down vote up
def TransRawData(test_data, vob_char_dict, vob_pinyin_dict, vob_wubi_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []
    X_pinyin = []
    X_wubi = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        lX_pinyin = []
        lX_wubi = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])
            if vob_pinyin_dict.has_key(char):
                lX_pinyin.append(vob_pinyin_dict[char])
            else:
                lX_pinyin.append(vob_pinyin_dict[u"<UNK>"])
            if vob_wubi_dict.has_key(char):
                lX_wubi.append(vob_wubi_dict[char])
            else:
                lX_wubi.append(vob_wubi_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)
            lX_pinyin.append(0)
            lX_wubi.append(0)

        X_char.append(lX_char)
        X_pinyin.append(lX_pinyin)
        X_wubi.append(lX_wubi)

    inp.close()
    return np.array(X_char), np.array(X_pinyin), np.array(X_wubi) 
Example 40
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_seg_nowubi.py    MIT License 5 votes vote down vote up
def TransRawData(test_data, vob_char_dict, vob_pinyin_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []
    X_pinyin = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        lX_pinyin = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])
            if vob_pinyin_dict.has_key(char):
                lX_pinyin.append(vob_pinyin_dict[char])
            else:
                lX_pinyin.append(vob_pinyin_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)
            lX_pinyin.append(0)

        X_char.append(lX_char)
        X_pinyin.append(lX_pinyin)

    inp.close()
    return np.array(X_char), np.array(X_pinyin) 
Example 41
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_seg.py    MIT License 5 votes vote down vote up
def TransRawData(test_data, vob_char_dict, vob_pinyin_dict, vob_wubi_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []
    X_pinyin = []
    X_wubi = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        lX_pinyin = []
        lX_wubi = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])
            if vob_pinyin_dict.has_key(char):
                lX_pinyin.append(vob_pinyin_dict[char])
            else:
                lX_pinyin.append(vob_pinyin_dict[u"<UNK>"])
            if vob_wubi_dict.has_key(char):
                lX_wubi.append(vob_wubi_dict[char])
            else:
                lX_wubi.append(vob_wubi_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)
            lX_pinyin.append(0)
            lX_wubi.append(0)

        X_char.append(lX_char)
        X_pinyin.append(lX_pinyin)
        X_wubi.append(lX_wubi)

    inp.close()
    return np.array(X_char), np.array(X_pinyin), np.array(X_wubi) 
Example 42
Project: SyNEThesia   Author: RunOrVeith   File: live_viewer.py    MIT License 5 votes vote down vote up
def __iter__(self):
        if self.stream is None or self.audio_controller is None:
            return
        else:
            for i in range(self.frames_per_buffer):
                data = self.stream.read(self.frames_per_buffer, exception_on_overflow=False)
                fmt = "<H"
                data = np.array(list(struct.iter_unpack(fmt, data)))
                yield self.feature_extractor(data) 
Example 43
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletSampler.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def hard_negative_multilabel(self):
        """Hard Negative Sampling based on multilabel assumption

        Search the negative sample with largest distance (smallest sim)
        with the anchor within self._k negative samplels
        """
        # During early iterations of sampling, use random sampling instead
        if self._iteration <= self._n:
            return self.random_multilabel()

        anchor_class_id, negative_class_id = np.random.choice(
            self._index.keys(), 2)
        anchor_id, positive_id = np.random.choice(
            self._index[anchor_class_id], 2)
        negative_ids = np.random.choice(
            self._index[negative_class_id], self._k)
        # calcualte the smallest simlarity one with negatives
        anchor_label = parse_label(self._labels[anchor_id])
        positive_label = parse_label(self._labels[positive_id])
        negative_labels = [parse_label(self._labels[negative_id]) for
                           negative_id in negative_ids]
        p_sim = intersect_sim(anchor_label, positive_label)
        n_sims = np.array(
            [intersect_sim(anchor_label, negative_label) for
             negative_label in negative_labels])
        min_sim_id = np.argmin(n_sims)
        negative_id = negative_ids[min_sim_id]
        n_sim = n_sims[min_sim_id]
        margin = p_sim - n_sim
        return (anchor_id, positive_id, negative_id, margin) 
Example 44
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: DataManager.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def load_all(self):
        """The function to load all data and labels

        Give:
        data: the list of raw data, needs to be decompressed
              (e.g., raw JPEG string)
        labels: numpy array, with each element is a string
        """
        start = time.time()
        print("Start Loading Data from BCF {}".format(
            'MEMORY' if self._bcf_mode == 'MEM' else 'FILE'))

        self._labels = np.loadtxt(self._label_fn).astype(str)

        if self._bcf.size() != self._labels.shape[0]:
            raise Exception("Number of samples in data"
                            "and labels are not equal")
        else:
            for idx in range(self._bcf.size()):
                datum_str = self._bcf.get(idx)
                self._data.append(datum_str)
        end = time.time()
        print("Loading {} samples Done: Time cost {} seconds".format(
            len(self._data), end - start))

        return self._data, self._labels 
Example 45
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: BasePythonDataLayer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_next_minibatch(self):
        """Generate next mini-batch

        The return value is array of numpy array: [data, label]
        Reshape funcion will be called based on resutls of this function

        Needs to implement in each class
        """
        pass 
Example 46
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletDataLayer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_a_datum(self):
        """Get a datum:

        Sampling -> decode images -> stack numpy array
        """
        sample = self._sampler.sample()
        if self._compressed:
            datum_ = [
                extract_sample(self._data[id], self._mean, self._resize) for
                id in sample[:3]]
        else:
            datum_ = [self._data[id] for id in sample[:3]]
        if len(sample) == 4:
            datum_.append(sample[-1])
        return datum_ 
Example 47
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletDataLayer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_a_datum(self):
        """Get a datum:

        Sampling -> decode images -> stack numpy array
        """
        sample = self._sampler.sample()
        if self._compressed:
            datum_ = [
                extract_sample(self._data[id], self._mean, self._resize) for
                id in sample[:3]]
        else:
            datum_ = [self._data[id] for id in sample[:3]]
        if len(sample) == 4:
            datum_.append(sample[-1])
        return datum_ 
Example 48
Project: PEAKachu   Author: tbischler   File: coverage.py    ISC License 5 votes vote down vote up
def _init_coverage_list(self, length):
        for strand in ["+", "-"]:
            self._coverages[strand] = np.array([0.0] * length) 
Example 49
Project: PEAKachu   Author: tbischler   File: tmm.py    ISC License 5 votes vote down vote up
def calc_size_factors(self):
        # Convert pandas dataframe to R dataframe
        r_dge = r.DGEList(self.count_df)
        # Calculate normalization factors
        r_dge = r.calcNormFactors(r_dge, method="TMM")
        size_factors = (np.array(r_dge.rx2('samples')["lib.size"]) *
                        np.array(r_dge.rx2("samples")["norm.factors"]))
        # convert to pandas series
        size_factors = pd.Series(size_factors, index=self.count_df.columns)
        # adjust size factors so that the maximum is 1.0
        size_factors = size_factors/size_factors.max()
        return size_factors 
Example 50
Project: PEAKachu   Author: tbischler   File: count.py    ISC License 5 votes vote down vote up
def count_reads_for_windows(self, replicon, strand, window_list):
        self._interval_tree = Intersecter()
        self._counts = np.array([0] * len(window_list))
        for ind, window in enumerate(window_list):
            self._interval_tree.add_interval(
                Interval(window[0],
                         window[1],
                         value=ind,
                         strand=strand))
        if self._paired_end:
            self._cache_read2(replicon)
        self._count_reads(replicon)
        return self._counts 
Example 51
Project: PEAKachu   Author: tbischler   File: count.py    ISC License 5 votes vote down vote up
def count_reads_for_peaks(self, replicon, peak_list):
        self._interval_tree = Intersecter()
        self._counts = np.array([0] * len(peak_list))
        for ind, peak in enumerate(peak_list):
            self._interval_tree.add_interval(
                Interval(peak["peak_start"]-1,
                         peak["peak_end"],
                         value=ind,
                         strand=peak["peak_strand"]))
        if self._paired_end:
            self._cache_read2(replicon)
        self._count_reads(replicon)
        return self._counts 
Example 52
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 5 votes vote down vote up
def QR_factorisation_Givens_double(A):

    n, m = A.shape
    R = np.array(A, dtype='float')
    Q = np.eye(n)
    for i in xrange(m - 1):
        for j in xrange(n - 1, i, -1):
            G = Givens_rotation_matrix_double(R[j - 1, i], R[j, i])
            R[(j - 1):(j + 1), :] = np.dot(G, R[(j - 1):(j + 1), :])
            Q[(j - 1):(j + 1), :] = np.dot(G, Q[(j - 1):(j + 1), :])
    return Q.T, np.triu(R) 
Example 53
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 5 votes vote down vote up
def Givens_rotation_matrix_double(a, b):
    c, s = Givens_rotation_double(a, b)
    return np.array([[c, -s], [s, c]]) 
Example 54
Project: b2ac   Author: hbldh   File: matrix_operations.py    MIT License 5 votes vote down vote up
def inverse_symmetric_3by3_double(M):
    """C style inverse of a symmetric, flattened 3 by 3 matrix, returning full matrix in
    floating-point, also in flattened format.

    :param M: The matrix to find inverse to. Assumes array with shape (6,).
    :type M: :py:class:`numpy.ndarray`
    :return: The inverse matrix, flattened.
    :rtype: :py:class:`numpy.ndarray`

    """

    determinant = 0
    adj_M = np.zeros((9,), dtype='float')

    # First row of adjugate matrix
    adj_M[0] = (M[3] * M[5] - (M[4] ** 2))  # Det #0
    adj_M[1] = -(M[1] * M[5] - M[4] * M[2])  # Det #1
    adj_M[2] = (M[1] * M[4] - M[3] * M[2])  # Det #2

    # Second row of adjugate matrix
    adj_M[3] = adj_M[1]
    adj_M[4] = (M[0] * M[5] - (M[2] ** 2))
    adj_M[5] = -(M[0] * M[4] - M[1] * M[2])

    # Third row of adjugate matrix
    adj_M[6] = adj_M[2]
    adj_M[7] = adj_M[5]
    adj_M[8] = (M[0] * M[3] - (M[1] ** 2))

    determinant += M[0] * adj_M[0]
    determinant += M[1] * adj_M[1]  # Using addition since minus is integrated in adjugate matrix.
    determinant += M[2] * adj_M[2]

    return adj_M / determinant 
Example 55
Project: b2ac   Author: hbldh   File: matrix_operations.py    MIT License 5 votes vote down vote up
def inverse_3by3_double(M):
    """C style inverse of a flattened 3 by 3 matrix, returning full matrix in
    floating-point, also in flattened format.

    :param M: The matrix to find inverse to.
    :type M: :py:class:`numpy.ndarray`
    :return: The inverse matrix.
    :rtype: :py:class:`numpy.ndarray`

    """
    if len(M.shape) > 1:
        M = M.flatten()

    M = np.array(M, 'float')

    determinant = 0.
    adj_M = np.zeros((9,), 'float')

    # First row of adjugate matrix
    adj_M[0] = (M[4] * M[8] - M[7] * M[5])  # Det #0
    adj_M[1] = -(M[1] * M[8] - M[7] * M[2])
    adj_M[2] = (M[1] * M[5] - M[4] * M[2])

    # Second row of adjugate matrix
    adj_M[3] = -(M[3] * M[8] - M[6] * M[5])  # Det #1
    adj_M[4] = (M[0] * M[8] - M[6] * M[2])
    adj_M[5] = -(M[0] * M[5] - M[3] * M[2])

    # Third row of adjugate matrix
    adj_M[6] = (M[3] * M[7] - M[6] * M[4])  # Det #2
    adj_M[7] = -(M[0] * M[7] - M[6] * M[1])
    adj_M[8] = (M[0] * M[4] - M[3] * M[1])

    determinant += M[0] * adj_M[0]
    determinant += M[1] * adj_M[3]  # Using addition since minus is integrated in adjugate matrix.
    determinant += M[2] * adj_M[6]

    return (adj_M / determinant) 
Example 56
Project: b2ac   Author: hbldh   File: reference.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses float storage during calculations and performs the
    eigensolver on a float array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    points = np.array(points, 'float')
    S = _calculate_scatter_matrix_py(points[:, 0], points[:, 1])
    S3 = S[3:, 3:]
    S3 = np.array([S3[0, 0], S3[0, 1], S3[0, 2], S3[1, 1], S3[1, 2], S3[2, 2]])
    S3_inv = inverse_symmetric_3by3_double(S3).reshape((3, 3))
    S2 = S[:3, 3:]
    T = -np.dot(S3_inv, S2.T)
    M = S[:3, :3] + np.dot(S2, T)
    inv_mat = np.array([[0, 0, 0.5], [0, -1, 0], [0.5, 0, 0]], 'float')
    M = inv_mat.dot(M)

    e_vals, e_vect = np.linalg.eig(M)

    try:
        elliptical_solution_index = np.where(((4 * e_vect[0, :] * e_vect[2, :]) - ((e_vect[1, :] ** 2))) > 0)[0][0]
    except:
        # No positive eigenvalues. Fit was not ellipse.
        raise ArithmeticError("No elliptical solution found.")

    a = e_vect[:, elliptical_solution_index]
    if a[0] < 0:
        a = -a
    return np.concatenate((a, np.dot(T, a))) 
Example 57
Project: b2ac   Author: hbldh   File: reference.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_int(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses int64 storage during calculations and performs the
    eigensolver on an integer array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    S = _calculate_scatter_matrix_c(points[:, 0], points[:, 1])
    S1 = np.array([S[0, 0], S[0, 1], S[0, 2], S[1, 1], S[1, 2], S[2, 2]])
    S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
    adj_S3, det_S3 = inverse_symmetric_3by3_int(S3)
    S2 = S[:3, 3:]
    T_no_det = - np.dot(np.array(adj_S3.reshape((3, 3)), 'int64'), np.array(S2.T, 'int64'))
    M_term2 = np.dot(np.array(S2, 'int64'), T_no_det) // det_S3
    M = add_symmetric_matrix(M_term2, S1)
    M[[0, 2], :] /= 2
    M[1, :] = -M[1, :]

    e_vals, e_vect = np.linalg.eig(M)

    try:
        elliptical_solution_index = np.where(((4 * e_vect[0, :] * e_vect[2, :]) - ((e_vect[1, :] ** 2))) > 0)[0][0]
    except:
        # No positive eigenvalues. Fit was not ellipse.
        raise ArithmeticError("No elliptical solution found.")
    a = e_vect[:, elliptical_solution_index]
    return np.concatenate((a, np.dot(T_no_det, a) / det_S3)) 
Example 58
Project: b2ac   Author: hbldh   File: int.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_int(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses int64 storage during calculations and performs the
    eigensolver on an integer array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section coefficients array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    e_conds = []
    M, T_no_det, determinant_S3 = _calculate_M_and_T_int64(points)

    e_vals = sorted(QR_algorithm_shift_Givens_int(M)[0])

    a = None
    for ev_ind in [1, 2, 0]:
        # Find the eigenvector that matches this eigenvector.
        eigenvector, e_norm = inverse_iteration_for_eigenvector_int(M, e_vals[ev_ind])
        # See if that eigenvector yields an elliptical solution.
        elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
        e_conds.append(elliptical_condition)
        if elliptical_condition > 0:
            a = eigenvector
            break

    if a is None:
        raise ArithmeticError("No elliptical solution found.")

    conic_coefficients = np.concatenate((a, np.dot(T_no_det, a) // determinant_S3))
    return conic_coefficients 
Example 59
Project: b2ac   Author: hbldh   File: int.py    MIT License 5 votes vote down vote up
def _calculate_M_and_T_int64(points):
    """Part of the B2AC ellipse fitting algorithm, calculating the M and T
     matrices needed.

     This integer implementation also returns the determinant of the
     scatter matrix, which hasn't been applied to the matrix T yet.

     M is exact in integer values, but is truncated towards zero compared
     to the double version.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: M, T undivided by determinant and the determinant.
    :rtype: tuple

    """
    S = _calculate_scatter_matrix_c(points[:, 0], points[:, 1])
    # Extract the symmetric parts of the S matrix.
    S1 = np.array([S[0, 0], S[0, 1], S[0, 2], S[1, 1], S[1, 2], S[2, 2]], dtype='int64')
    S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]], dtype='int64')

    adj_S3, det_S3 = inverse_symmetric_3by3_int64(S3)
    S2 = S[:3, 3:]

    T_no_det = - np.dot(np.array(adj_S3.reshape((3, 3)), 'int64'), np.array(S2.T, 'int64'))
    T_no_det, det_S3 = scale_T_matrix(T_no_det, det_S3)
    M_term_2 = np.dot(np.array(S2, 'int64'), T_no_det) // det_S3
    M = matrix_add_symmetric(M_term_2, S1)
    M[[0, 2], :] //= 2
    M[1, :] = -M[1, :]

    return M, T_no_det, det_S3 
Example 60
Project: b2ac   Author: hbldh   File: point.py    MIT License 5 votes vote down vote up
def __init__(self, point):
        """Constructor for B2ACEllipse"""
        super(B2ACPoint, self).__init__()
        if len(point) != 2:
            raise ValueError("Only 2D-points supported.")
        self.point = np.array(point, 'float') 
Example 61
Project: b2ac   Author: hbldh   File: polygon.py    MIT License 5 votes vote down vote up
def __init__(self, points):
        """Constructor for B2ACPolygon"""
        super(B2ACPolygon, self).__init__()

        self.polygon_points = np.array(points, 'float')
        if self.polygon_points.shape[1] != 2:
            raise ValueError("Polygon must be entered as a [n x 2] array, i.e. a 2D polygon.") 
Example 62
Project: b2ac   Author: hbldh   File: polygon.py    MIT License 5 votes vote down vote up
def get_center_point(self, use_centroid=True):
        """Returns a center of weight for the object.

        :param use_centroid: Uses a centroid finding method instead of pure mean of vertices.
        :type use_centroid: bool

        """
        if use_centroid:
            with warnings.catch_warnings(record=False) as w:
                # Cause all warnings to never be triggered.
                warnings.simplefilter("ignore")

                pnt_array = self.get_closed_polygon()

                A = self._area_help_function()
                D = (pnt_array[:-1, 0] * pnt_array[1:, 1] -
                     pnt_array[1:, 0] * pnt_array[:-1, 1])

                c_x = ((pnt_array[:-1, 0] + pnt_array[1:, 0]) * D).sum() / (6 * A)
                c_y = ((pnt_array[:-1, 1] + pnt_array[1:, 1]) * D).sum() / (6 * A)

                if np.isnan(c_x) or np.isinf(c_x) or np.isnan(c_y) or np.isinf(c_y):
                    # If centroid calculations fails (e.g. due to zero-valued area) then use the
                    # mean of the vertices as center point instead.
                    return np.mean(self.get_open_polygon(), 0)
                else:
                    return np.array([c_x, c_y])
        else:
            return np.mean(self.get_open_polygon(), 0) 
Example 63
Project: b2ac   Author: hbldh   File: polygon.py    MIT License 5 votes vote down vote up
def get_closed_polygon(self):
        """Appends the first point to the end of point array, in order to "close" the polygon."""
        if not self.is_closed:
            return np.concatenate([self.polygon_points, [self.polygon_points[0, :]]])
        else:
            return self.polygon_points 
Example 64
Project: b2ac   Author: hbldh   File: inverse_iteration.py    MIT License 5 votes vote down vote up
def inverse_iteration_for_eigenvector_double(A, eigenvalue, n_iterations=1):
    """Performs a series of inverse iteration steps with a known
    eigenvalue to produce its eigenvector.

    :param A: The 3x3 matrix to which the eigenvalue belongs.
    :type A: :py:class:`numpy.ndarray`
    :param eigenvalue: One eigenvalue of the matrix A.
    :type eigenvalue: float
    :param n_iterations: Number of iterations to perform the multiplication
     with the inverse. For a accurate eigenvalue, one iteration is enough
     for a ~1e-6 correct eigenvector. More than five is usually unnecessary.
    :type n_iterations: int
    :return: The eigenvector of this matrix and eigenvalue combination.
    :rtype: :py:class:`numpy.ndarray`

    """
    A = np.array(A, 'float')
    # Subtract the eigenvalue from the diagonal entries of the matrix.
    # N_POLYPOINTS.B. Also slightly perturb the eigenvalue so the matrix will
    # not be so close to singular!
    for k in xrange(A.shape[0]):
        A[k, k] -= eigenvalue + 0.001
    # Obtain the inverse of the matrix.
    A_inv = mo.inverse_3by3_double(A).reshape((3, 3))
    # Instantiate the eigenvector to iterate with.
    eigenvector = np.ones((A.shape[0], ), 'float')
    eigenvector /= np.linalg.norm(eigenvector)
    # Perform the desired number of iterations.
    for k in xrange(n_iterations):
        eigenvector = np.dot(A_inv, eigenvector)
        eigenvector /= np.linalg.norm(eigenvector)

    if np.any(np.isnan(eigenvector)) or np.any(np.isinf(eigenvector)):
        print("Nan and/or Infs in eigenvector!")

    if (eigenvector[0] < 0) and (eigenvector[2] < 0):
        eigenvector = -eigenvector
    return eigenvector 
Example 65
Project: b2ac   Author: hbldh   File: inverse_iteration.py    MIT License 5 votes vote down vote up
def inverse_iteration_for_eigenvector_int(A, eigenvalue):
    """Performs a series of inverse iteration steps with a known
    eigenvalue to produce its eigenvector.

    :param A: The 3x3 matrix to which the eigenvalue belongs.
    :type A: :py:class:`numpy.ndarray`
    :param eigenvalue: One approximate eigenvalue of the matrix A.
    :type eigenvalue: int
    :return: The eigenvector of this matrix and eigenvalue combination.
    :rtype: :py:class:`numpy.ndarray`

    """
    A = np.array(A, 'int64')

    # Subtract the eigenvalue from the diagonal entries of the matrix.
    for k in xrange(A.shape[0]):
        A[k, k] -= eigenvalue
    A, scale = fp.scale_64bit_matrix(A)

    # Obtain the inverse of the matrix.
    adj_A = mo.inverse_3by3_int64(A.flatten(), False)
    eigenvector = adj_A.reshape((3, 3)).sum(1)
    eigenvector, scale = fp.scale_64bit_vector(eigenvector)

    e_norm = int(np.sqrt((eigenvector ** 2).sum()))
    if (eigenvector[0] < 0) and (eigenvector[2] < 0):
        eigenvector = -eigenvector
    return eigenvector, e_norm 
Example 66
Project: b2ac   Author: hbldh   File: test_ext.py    MIT License 5 votes vote down vote up
def setup(self):
        self.e = B2ACEllipse(center=(50.0, 75.0), radii=(50.0, 20.0), rotation_angle=0.707)
        self.points = np.array(self.e.polygonize(), 'int32') 
Example 67
Project: b2ac   Author: hbldh   File: test_double.py    MIT License 5 votes vote down vote up
def setup(self):
        self.e = B2ACEllipse(center=(50.0, 75.0), radii=(50.0, 20.0), rotation_angle=0.707)
        self.points = np.array(self.e.polygonize(), 'int32')

#    def test_fit_numpy_version(self):
#        # Fails
#        output = fit.fit_improved_B2AC_numpy(self.points.copy())
#        e_fitted = B2ACEllipse(*output)
#        assert np.linalg.norm(self.e.center_point - e_fitted.center_point) < 1
#        assert np.linalg.norm(max(self.e.radii) - max(e_fitted.radii)) < 0.25
#        assert np.linalg.norm(min(self.e.radii) - min(e_fitted.radii)) < 0.25
#        assert overlap(self.e, e_fitted) > 0.98
#        assert overlap(e_fitted, self.e) > 0.98 
Example 68
Project: b2ac   Author: hbldh   File: test_int.py    MIT License 5 votes vote down vote up
def setup(self):
        self.e = B2ACEllipse(center=(50.0, 75.0), radii=(50.0, 20.0), rotation_angle=0.707)
        self.points = np.array(self.e.polygonize(), 'int32') 
Example 69
Project: meta-transfer-learning   Author: erfaneshrati   File: miniimagenet.py    MIT License 5 votes vote down vote up
def _read_image(self, name):
        if name in self._cache:
            return self._cache[name].astype('float32') / 0xff
        with open(os.path.join(self.dir_path, name), 'rb') as in_file:
            img = Image.open(in_file).resize((84, 84)).convert('RGB')
            self._cache[name] = np.array(img)
            return self._read_image(name) 
Example 70
Project: kicker-module   Author: EvanTheB   File: graph.py    GNU General Public License v3.0 5 votes vote down vote up
def graph_skill_topn(p, g, ladder, n):
    def get_names_skill(data):
        ret = []
        for l in data:
            ret.append((l.name, l.extra[1][1]))
        return ret

    def get_top_n(data):
        tup = sorted(data, key=lambda x: float(x[1]))
        names = [t[0] for t in tup[-n:]]
        return names

    graph_data = []
    for i in range(1, len(g) + 1):
        data = ladder.process(p, g[0:i])
        graph_data.append(get_names_skill(data))

    names = sorted(get_top_n(graph_data[-1]))
    graph_data = [sorted(x) for x in get_subset(graph_data, names)]

    ys = data_to_yarr(graph_data)
    for i in range(len(ys)):
        y = np.array(ys[i])
        plt.plot(range(len(y)), y, label = names[i])
    plt.legend(loc=3, ncol=len(names)/2)
    pylab.savefig('graph_skill_topn.svg')
 #   plt.show() 
Example 71
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: DataManager.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def load_all(self):
        """The function to load all data and labels

        Give:
        data: the list of raw data, needs to be decompressed
              (e.g., raw JPEG string)
        labels: numpy array of string, to support multiple label
        """
        start = time.time()
        print("Start Loading Data from CSV File {}".format(
            self._source_fn))
        # split csv using both space, tab, or comma
        sep = '[\s,]+'
        try:
            df = pd.read_csv(self._source_fn, sep=sep, engine='python',
                             header=self._header)
            print("Totally {} rows loaded to parse...".format(
                len(df.index)
            ))
            # parse df to get image file name and label
            for ln in df.iterrows():
                # for each row, the first column is file name, then labels
                fn_ = ln[1][0]
                if self._root:
                    fn_ = os.path.join(self._root, fn_)
                if not os.path.exists(fn_):
                    print("File {} does not exist, skip".format(fn_))
                    continue
                # read labels: the first column is image file name
                # and others are labels (one or more)
                label_ = ln[1][1:].values
                if len(label_) == 1:
                    label_ = label_[0]
                else:
                    label_ = ":".join([str(x) for x in label_.astype(int)])
                self._labels.append(str(label_))
                # open file as binary and read in
                with open(fn_, 'rb') as image_fp:
                    datum_str_ = image_fp.read()
                    self._data.append(datum_str_)
        except:
            print sys.exc_info()[1], fn_
            raise Exception("Error in Parsing input file")
        end = time.time()
        self._labels = np.array(self._labels)
        print("Loading {} samples Done: Time cost {} seconds".format(
            len(self._data), end - start))

        return self._data, self._labels 
Example 72
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: DataManager.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def load_all(self):
        """The function to load all data and labels

        Give:
        data: the list of raw data, needs to be decompressed
              (e.g., raw JPEG string)
        labels: 0-based labels, in format of numpy array
        """
        start = time.time()
        print("Start Loading Data from CSV File {}".format(
            self._source_fn))
        try:
            db_ = lmdb.open(self._source_fn)
            data_cursor_ = db_.begin().cursor()
            if self._label_fn:
                label_db_ = lmdb.open(self._label_fn)
                label_cursor_ = label_db_.begin().cursor()
            # begin reading data
            if self._label_fn:
                label_cursor_.first()
            while data_cursor_.next():
                value_str = data_cursor_.value()
                datum_ = caffe_pb2.Datum()
                datum_.ParseFromString(value_str)
                self._data.append(datum_.data)
                if self._label_fn:
                    label_cursor_.next()
                    label_datum_ = caffe_pb2.Datum()
                    label_datum_.ParseFromString(label_cursor_.value())
                    label_ = caffe.io.datum_to_array(label_datum_)
                    label_ = ":".join([str(x) for x in label_.astype(int)])
                else:
                    label_ = str(datum_.label)
                self._labels.appen(label_)
            # close all db
            db_.close()
            if self._label_fn:
                label_db_.close()
        except:
            raise Exception("Error in Parsing input file")
        end = time.time()
        self._labels = np.array(self._labels)
        print("Loading {} samples Done: Time cost {} seconds".format(
            len(self._data), end - start))

        return self._data, self._labels 
Example 73
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 4 votes vote down vote up
def convert_to_Hessenberg_double(A):
    """ Tridiagonalize a square matrix to upper Hessenberg form using Householder reflections.

    Costs 10/3 * n^3 + O(n^2) operations, where n is the size of the matrix.

    .. code-block:: matlab

        function A = mytridiag(A)

        [m,n] = size(A);
        if (m ~= n)
            error('Input matrix is not square.')
        end

        for k = 1:(m - 2)
            vk = A((k+1):m,k);
            vk(1) = vk(1) + sign(vk(1)) * norm(vk);
            vk = vk / norm(vk);
            A((k+1):m,k:m) = A((k+1):m,k:m) - ...
                                2 * vk * (vk' * A((k+1):m,k:m));
            A(1:m,(k+1):m) = A(1:m,(k+1):m) - ...
                               2 * (A(1:m,(k+1):m) * vk) * vk';
        end

        end

    :param A: The matrix to convert.
    :type A: :py:class:`numpy.ndarray`
    :return: The Hessenberg matrix.
    :rtype: :py:class:`numpy.ndarray`

    """
    m, n = A.shape
    A = np.array(A, 'float')

    for k in xrange(m - 1):
        vk = A[(k + 1):, k].copy()
        vk[0] += np.sign(vk[0]) * np.linalg.norm(vk, 2)
        vk /= np.linalg.norm(vk, 2)
        A[(k + 1):, k:] -= 2 * np.outer(vk, np.dot(vk, A[(k + 1):, k:]))
        A[:, (k + 1):] -= 2 * np.outer(np.dot(A[:, (k + 1):], vk), vk)

    return np.triu(A, -1) 
Example 74
Project: b2ac   Author: hbldh   File: matrix_operations.py    MIT License 4 votes vote down vote up
def inverse_symmetric_3by3_int64(M):
    """C style inverse of a symmetric, flattened 3 by 3 matrix, represented
    by the six unique values

    .. code-block:: python

        np.array([S[0, 0], S[0, 1], S[0, 2], S[1, 1], S[1, 2], S[2, 2]])

    Returns the flattened full adjugate matrix and the determinant, s.t.

    .. math::

        M^{-1} = \\frac{1}{\\det(M)} \\cdot \\text{adj}(M).


    For integer matrices, this then returns exact results by avoiding
    to apply the division.

    :param M: The matrix to find inverse to. Assumes array with shape (6,).
    :type M: :py:class:`numpy.ndarray`
    :return: The adjugate flattened matrix and the determinant.
    :rtype: tuple

    """

    determinant = 0
    adj_M = np.zeros((9,), dtype='int64')

    # First row of adjugate matrix
    adj_M[0] = (M[3] * M[5] - (M[4] ** 2))  # Det #0
    adj_M[1] = -(M[1] * M[5] - M[4] * M[2])  # Det #1
    adj_M[2] = (M[1] * M[4] - M[3] * M[2])  # Det #2

    # Second row of adjugate matrix
    adj_M[3] = adj_M[1]
    adj_M[4] = (M[0] * M[5] - (M[2] ** 2))
    adj_M[5] = -(M[0] * M[4] - M[1] * M[2])

    # Third row of adjugate matrix
    adj_M[6] = adj_M[2]
    adj_M[7] = adj_M[5]
    adj_M[8] = (M[0] * M[3] - (M[1] ** 2))

    determinant += np.int64(M[0]) * np.int64(adj_M[0])
    determinant += np.int64(M[1]) * np.int64(adj_M[1])  # Using addition since minus is integrated in adjugate matrix.
    determinant += np.int64(M[2]) * np.int64(adj_M[2])

    return adj_M, determinant 
Example 75
Project: b2ac   Author: hbldh   File: reference.py    MIT License 4 votes vote down vote up
def fit_improved_B2AC_numpy(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting simply applies NumPy:s methods for calculating
    the conic section, modelled after the Matlab code in the paper:

    .. code-block::

        function a = fit_ellipse(x, y)

        D1 = [x .ˆ 2, x .* y, y .ˆ 2]; % quadratic part of the design matrix
        D2 = [x, y, ones(size(x))]; % linear part of the design matrix
        S1 = D1’ * D1; % quadratic part of the scatter matrix
        S2 = D1’ * D2; % combined part of the scatter matrix
        S3 = D2’ * D2; % linear part of the scatter matrix
        T = - inv(S3) * S2’; % for getting a2 from a1
        M = S1 + S2 * T; % reduced scatter matrix
        M = [M(3, :) ./ 2; - M(2, :); M(1, :) ./ 2]; % premultiply by inv(C1)
        [evec, eval] = eig(M); % solve eigensystem
        cond = 4 * evec(1, :) .* evec(3, :) - evec(2, :) .ˆ 2; % evaluate a’Ca
        a1 = evec(:, find(cond > 0)); % eigenvector for min. pos. eigenvalue
        a = [a1; T * a1]; % ellipse coefficients

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    x = points[:, 0]
    y = points[:, 1]

    D1 = np.vstack([x ** 2, x * y, y ** 2]).T
    D2 = np.vstack([x, y, np.ones((len(x), ), dtype=x.dtype)]).T
    S1 = D1.T.dot(D1)
    S2 = D1.T.dot(D2)
    S3 = D2.T.dot(D2)
    T = -np.linalg.inv(S3).dot(S2.T)
    M = S1 + S2.dot(T)
    M = np.array([M[2, :] / 2, -M[1, :], M[0, :] / 2])
    eval, evec = np.linalg.eig(M)
    cond = (4 * evec[:, 0] * evec[:, 2]) - (evec[:, 1] ** 2)
    I = np.where(cond > 0)[0]
    a1 = evec[:, I[np.argmin(cond[I])]]
    return np.concatenate([a1, T.dot(a1)]) 
Example 76
Project: meta-transfer-learning   Author: erfaneshrati   File: reptile.py    MIT License 4 votes vote down vote up
def train_metatransfer_step(self,
                   dataset,
                   input_ph,
                   label_ph,
                   real_label,
                   minimize_op_metalearner,
                   minimize_op_classifier,
                   num_classes,
                   num_shots,
                   inner_batch_size,
                   inner_iters,
                   replacement,
                   meta_step_size,
                   meta_batch_size):
        beta=0.1
        old_vars = self._model_state.export_variables()
        updates = []
        new_vars_classifier = []
        for _ in range(meta_batch_size):
            mini_dataset = _sample_mini_dataset(dataset, num_classes, num_shots, metatransfer=True)
            mini_batches = self._mini_batches(mini_dataset, inner_batch_size, inner_iters,
                                              replacement)
            minidataset_inputs = np.array([])
            minidataset_labels = np.array([])
            minidataset_real_labels = np.array([])
            for batch in mini_batches:
                inputs, labels, real_labels = zip(*batch)
                if len(minidataset_inputs) == 0:
                    minidataset_inputs = inputs
                    minidataset_labels = labels
                    minidataset_real_labels = real_labels
                else:
                    minidataset_inputs = np.append(minidataset_inputs, np.array(inputs),axis=0)
                    minidataset_labels = np.append(minidataset_labels, np.array(labels),axis=0)
                    minidataset_real_labels = np.append(minidataset_real_labels, np.array(real_labels),axis=0)
                last_backup = self._model_state.export_variables()
                if self._pre_step_op:
                    self.session.run(self._pre_step_op)
                self.session.run(minimize_op_metalearner, feed_dict={input_ph: inputs, label_ph: labels})
            updates.append(subtract_vars(self._model_state.export_variables(), last_backup))
            self._model_state.import_variables(old_vars)
            self.session.run(minimize_op_classifier, feed_dict={input_ph: minidataset_inputs, label_ph: minidataset_labels, real_label: minidataset_real_labels})
            new_vars_classifier.append(self._model_state.export_variables())
            self._model_state.import_variables(old_vars)

        update = average_vars(updates)
        new_vars_classifier = average_vars(new_vars_classifier)
        classifier_param_new = subtract_vars(old_vars,scale_vars(subtract_vars(old_vars,new_vars_classifier),beta))
        metaleaner_param_new = subtract_vars(old_vars,scale_vars(subtract_vars(old_vars,add_vars(old_vars, scale_vars(update, meta_step_size))),1-beta))
        self._model_state.import_variables(average_vars([metaleaner_param_new, classifier_param_new])) 
Example 77
Project: model-api-sequence   Author: evandowning   File: rnn.py    GNU General Public License v3.0 4 votes vote down vote up
def sequence_generator(folder, sample, foldIDs, batchSize, task, convert):
    # We want to loop infinitely because we're training our data on multiple epochs in build_LSTM_model()
    while 1:
        xSet = np.array([])
        ySet = np.array([])

        num = 0;
        for i in foldIDs:
            x = np.array([])
            y = np.array([])

            # Extract sample's name and number of sequences
            fn = sample[i][0]
            numSeq = sample[i][1]

            # Read in sample's sequences
            path = os.path.join(folder,fn+'.pkl')
            with open(path, 'rb') as fr:
                for e in enumerate(range(numSeq)):
                    t = pkl.load(fr)
                    x = t[0]
                    y = t[1]

                    # If this should be binary classification, convert labels > 0 to 1
                    if task == 'binary_classification':
                        if y > 0:
                            y = 1
                    elif task == 'multi_classification':
                        y = convert.index(y)

                    if len(xSet) == 0:
                        xSet = x
                        ySet = [y]
                    else:
                        xSet = np.vstack([xSet,x])
                        ySet = np.vstack([ySet,[y]])

                    # Increase count of number of sample features extracted
                    num += 1

                    # Batch size reached, yield data
                    if num % batchSize == 0:
                        # Here we convert our lists into Numpy arrays because
                        # Keras requires it as input for its fit_generator()
                        rv_x = xSet
                        rv_y = ySet

                        xSet = np.array([])
                        ySet = np.array([])

                        num = 0

                        yield (rv_x, rv_y)

        # Yield remaining set
        if len(xSet) > 0:
            yield (xSet, ySet)

# Builds LSTM model 
Example 78
Project: model-api-sequence   Author: evandowning   File: lstm_cudnn.py    GNU General Public License v3.0 4 votes vote down vote up
def sequence_generator(folder, sample, foldIDs, batchSize, task, convert):
    # We want to loop infinitely because we're training our data on multiple epochs in build_LSTM_model()
    while 1:
        xSet = np.array([])
        ySet = np.array([])

        num = 0;
        for i in foldIDs:
            x = np.array([])
            y = np.array([])

            # Extract sample's name and number of sequences
            fn = sample[i][0]
            numSeq = sample[i][1]

            # Read in sample's sequences
            path = os.path.join(folder,fn+'.pkl')
            with open(path, 'rb') as fr:
                for e in enumerate(range(numSeq)):
                    t = pkl.load(fr)
                    x = t[0]
                    y = t[1]

                    # If this should be binary classification, convert labels > 0 to 1
                    if task == 'binary_classification':
                        if y > 0:
                            y = 1
                    elif task == 'multi_classification':
                        y = convert.index(y)

                    if len(xSet) == 0:
                        xSet = x
                        ySet = [y]
                    else:
                        xSet = np.vstack([xSet,x])
                        ySet = np.vstack([ySet,[y]])

                    # Increase count of number of sample features extracted
                    num += 1

                    # Batch size reached, yield data
                    if num % batchSize == 0:
                        # Here we convert our lists into Numpy arrays because
                        # Keras requires it as input for its fit_generator()
                        rv_x = xSet
                        rv_y = ySet

                        xSet = np.array([])
                        ySet = np.array([])

                        num = 0

                        yield (rv_x, rv_y)

        # Yield remaining set
        if len(xSet) > 0:
            yield (xSet, ySet)

# Builds LSTM model 
Example 79
Project: model-api-sequence   Author: evandowning   File: lstm.py    GNU General Public License v3.0 4 votes vote down vote up
def sequence_generator(folder, sample, foldIDs, batchSize, task, convert):
    # We want to loop infinitely because we're training our data on multiple epochs in build_LSTM_model()
    while 1:
        xSet = np.array([])
        ySet = np.array([])

        num = 0;
        for i in foldIDs:
            x = np.array([])
            y = np.array([])

            # Extract sample's name and number of sequences
            fn = sample[i][0]
            numSeq = sample[i][1]

            # Read in sample's sequences
            path = os.path.join(folder,fn+'.pkl')
            with open(path, 'rb') as fr:
                for e in enumerate(range(numSeq)):
                    t = pkl.load(fr)
                    x = t[0]
                    y = t[1]

                    # If this should be binary classification, convert labels > 0 to 1
                    if task == 'binary_classification':
                        if y > 0:
                            y = 1
                    elif task == 'multi_classification':
                        y = convert.index(y)

                    if len(xSet) == 0:
                        xSet = x
                        ySet = [y]
                    else:
                        xSet = np.vstack([xSet,x])
                        ySet = np.vstack([ySet,[y]])

                    # Increase count of number of sample features extracted
                    num += 1

                    # Batch size reached, yield data
                    if num % batchSize == 0:
                        # Here we convert our lists into Numpy arrays because
                        # Keras requires it as input for its fit_generator()
                        rv_x = xSet
                        rv_y = ySet

                        xSet = np.array([])
                        ySet = np.array([])

                        num = 0

                        yield (rv_x, rv_y)

        # Yield remaining set
        if len(xSet) > 0:
            yield (xSet, ySet)

# Builds LSTM model 
Example 80
Project: model-api-sequence   Author: evandowning   File: cnn.py    GNU General Public License v3.0 4 votes vote down vote up
def sequence_generator(folder, sample, foldIDs, batchSize, task, convert):
    # We want to loop infinitely because we're training our data on multiple epochs in build_LSTM_model()
    while 1:
        xSet = np.array([])
        ySet = np.array([])

        num = 0;
        for i in foldIDs:
            x = np.array([])
            y = np.array([])

            # Extract sample's name and number of sequences
            fn = sample[i][0]
            numSeq = sample[i][1]

            # Read in sample's sequences
            path = os.path.join(folder,fn+'.pkl')
            with open(path, 'rb') as fr:
                for e in enumerate(range(numSeq)):
                    t = pkl.load(fr)
                    x = t[0]
                    y = t[1]

                    # If this should be binary classification, convert labels > 0 to 1
                    if task == 'binary_classification':
                        if y > 0:
                            y = 1
                    elif task == 'multi_classification':
                        y = convert.index(y)

                    if len(xSet) == 0:
                        xSet = x
                        ySet = [y]
                    else:
                        xSet = np.vstack([xSet,x])
                        ySet = np.vstack([ySet,[y]])

                    # Increase count of number of sample features extracted
                    num += 1

                    # Batch size reached, yield data
                    if num % batchSize == 0:
                        # Here we convert our lists into Numpy arrays because
                        # Keras requires it as input for its fit_generator()
                        rv_x = xSet
                        rv_y = ySet

                        xSet = np.array([])
                        ySet = np.array([])

                        num = 0

                        yield (rv_x, rv_y)

        # Yield remaining set
        if len(xSet) > 0:
            yield (xSet, ySet)

# Builds LSTM model