Python numpy.array() Examples

The following are code examples for showing how to use numpy.array(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: b2ac   Author: hbldh   File: ellipse.py    MIT License 8 votes vote down vote up
def polygonize(self, n=73):
        """Gets a approximate polygon array representing the ellipse.

        Note that the last point is the same as the first point, creating a closed
        polygon.

        :param n: The number of points to generate. Default is 73 (one vertex every 5 degrees).
        :type n: int
        :return: An [n  x 2] numpy array, describing the boundary vertices of
                 the polygonized ellipse.
        :rtype: :py:class:`numpy.ndarray`

        """
        t = np.linspace(0, 2 * np.pi, num=n, endpoint=True)
        out = np.zeros((len(t), 2), dtype='float')
        out[:, 0] = (self.center_point[0] +
                     self.radii[0] * np.cos(t) * np.cos(self.rotation_angle) -
                     self.radii[1] * np.sin(t) * np.sin(self.rotation_angle))
        out[:, 1] = (self.center_point[1] +
                     self.radii[0] * np.cos(t) * np.sin(self.rotation_angle) +
                     self.radii[1] * np.sin(t) * np.cos(self.rotation_angle))
        return out 
Example 2
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 7 votes vote down vote up
def QR_factorisation_Householder_double(A):
    """Perform QR factorisation in double floating-point precision.

    :param A: The matrix to factorise.
    :type A: :py:class:`numpy.ndarray`
    :returns: The matrix Q and the matrix R.
    :rtype: tuple

    """
    A = np.array(A, 'float')

    n, m = A.shape
    V = np.zeros_like(A, 'float')
    for k in xrange(n):
        V[k:, k] = A[k:, k].copy()
        V[k, k] += np.sign(V[k, k]) * np.linalg.norm(V[k:, k], 2)
        V[k:, k] /= np.linalg.norm(V[k:, k], 2)
        A[k:, k:] -= 2 * np.outer(V[k:, k], np.dot(V[k:, k], A[k:, k:]))
    R = np.triu(A[:n, :n])

    Q = np.eye(m, n)
    for k in xrange((n - 1), -1, -1):
        Q[k:, k:] -= np.dot((2 * (np.outer(V[k:, k], V[k:, k]))), Q[k:, k:])
    return Q, R 
Example 3
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 6 votes vote down vote up
def __init__(self, input_wave_file, output_wave_file, target_phrase):
        self.pop_size = 100
        self.elite_size = 10
        self.mutation_p = 0.005
        self.noise_stdev = 40
        self.noise_threshold = 1
        self.mu = 0.9
        self.alpha = 0.001
        self.max_iters = 3000
        self.num_points_estimate = 100
        self.delta_for_gradient = 100
        self.delta_for_perturbation = 1e3
        self.input_audio = load_wav(input_wave_file).astype(np.float32)
        self.pop = np.expand_dims(self.input_audio, axis=0)
        self.pop = np.tile(self.pop, (self.pop_size, 1))
        self.output_wave_file = output_wave_file
        self.target_phrase = target_phrase
        self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase])) 
Example 4
Project: iglovikov_segmentation   Author: ternaus   File: dataset.py    MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        image_path = self.image_paths[idx]

        image = load_rgb(image_path, lib=self.imread_library)

        # apply transformations
        normalized_image = self.transform(image=image)["image"]

        if self.factor is not None:
            normalized_image, pads = pad(normalized_image, factor=self.factor)

            return {
                "image_id": image_path.stem,
                "features": tensor_from_rgb_image(normalized_image),
                "pads": np.array(pads),
            }

        return {"image_id": image_path.stem, "features": tensor_from_rgb_image(normalized_image)} 
Example 5
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 6
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 7
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 8
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 9
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm4_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 10
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_time_paper.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 11
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 12
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_fc_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 13
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train_paper.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 14
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_fc_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 15
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train_paper.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 16
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 17
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 18
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 19
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_share_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 20
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_share_lstm3_crf_train.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 21
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_time.py    MIT License 6 votes vote down vote up
def do_load_data(path):
    x = []
    y = []
    fp = open(path, "r")
    for line in fp.readlines():
        line = line.rstrip()
        if not line:
            continue
        ss = line.split(" ")
        assert (len(ss) == (FLAGS.max_sentence_len * 2))
        lx = []
        ly = []
        for i in range(FLAGS.max_sentence_len):
            lx.append(int(ss[i]))
            ly.append(int(ss[i + FLAGS.max_sentence_len]))
        x.append(lx)
        y.append(ly)
    fp.close()
    return np.array(x), np.array(y) 
Example 22
Project: multi-embedding-cws   Author: wangjksjtu   File: baseline_crf_seg.py    MIT License 6 votes vote down vote up
def TransRawData(test_data, vob_char_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)

        X_char.append(lX_char)

    inp.close()
    return np.array(X_char) 
Example 23
Project: speed_estimation   Author: NeilNie   File: speed_predictor.py    MIT License 6 votes vote down vote up
def predict_speed(self, image):

        """
        predict the steering angle given the image. Only return results if
        self.input >= configs.length
        :param image:   input image
        :return:        steering angle
        """

        image = cv2.resize(image, (configs.IMG_WIDTH, configs.IMG_HEIGHT))

        if len(self.inputs) < configs.LENGTH:
            self.inputs.append(image)

        if len(self.inputs) == configs.LENGTH:
            prediction = self.model.model.predict(np.array([self.inputs]))[0][0]
            self.inputs.pop(0)
            return prediction

        if len(self.inputs) > configs.LENGTH:
            raise ValueError("Input length can't be longer than network input length")

        return 0.0 
Example 24
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: BasePythonDataLayer.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def setup(self, bottom, top):
        layer_params = yaml.load(self.param_str)
        self._layer_params = layer_params
        # default batch_size = 256
        self._batch_size = int(layer_params.get('batch_size', 256))
        self._resize = layer_params.get('resize', -1)
        self._mean_file = layer_params.get('mean_file', None)
        self._source_type = layer_params.get('source_type', 'CSV')
        self._shuffle = layer_params.get('shuffle', False)
        # read image_mean from file and preload all data into memory
        # will read either file or array into self._mean
        self.set_mean()
        self.preload_db()
        self._compressed = self._layer_params.get('compressed', True)
        if not self._compressed:
            self.decompress_data() 
Example 25
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletDataLayer.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_next_minibatch(self):
        if self._prefetch:
            # get mini-batch from prefetcher
            batch = self._conn.recv()
        else:
            # generate using in-thread functions
            data = []
            p_data = []
            n_data = []
            label = []
            for i in range(self._batch_size):
                datum_ = self.get_a_datum()
                data.append(datum_[0])
                p_data.append(datum_[1])
                n_data.append(datum_[2])
                if len(datum_) == 4:
                    # datum and label / margin
                    label.append(datum_[-1])
            batch = [np.array(data),
                     np.array(p_data),
                     np.array(n_data)]
            if len(label):
                label = np.array(label).reshape(self._batch_size, 1, 1, 1)
                batch.append(label)
        return batch 
Example 26
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletDataLayer.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_next_minibatch(self):
        # generate using in-thread functions
        data = []
        p_data = []
        n_data = []
        label = []
        for i in range(self._batch_size):
            datum_ = self.get_a_datum()
            # print(len(datum_), ":".join([str(x.shape) for x in datum_]))
            data.append(datum_[0])
            p_data.append(datum_[1])
            n_data.append(datum_[2])
            if len(datum_) == 4:
                # datum and label / margin
                label.append(datum_[-1])
        batch = [np.array(data),
                 np.array(p_data),
                 np.array(n_data)]
        if len(label):
            label = np.array(label).reshape(self._batch_size, 1, 1, 1)
            batch.append(label)
        return batch 
Example 27
Project: PEAKachu   Author: tbischler   File: consensus_peak.py    ISC License 6 votes vote down vote up
def _get_coverage_for_replicon_peaks(self, replicon, lib_strand,
                                         pos_value_pairs, cons_values):
        for peak in self._replicon_peak_dict[replicon][lib_strand]:
            value_list = []
            peak_center = int((peak[0] + peak[1]) / 2)
            if self._consensus_length % 2 == 0:
                cons_start = peak_center - (
                    int(self._consensus_length / 2) - 1)
                cons_end = peak_center + int(self._consensus_length / 2)
            else:
                cons_start = peak_center - (
                    int(self._consensus_length / 2) - 1)
                cons_end = peak_center + (int(self._consensus_length / 2) + 1)
            for pos in range(cons_start, cons_end + 1):
                value_list.append(abs(pos_value_pairs.get(pos, 0.0)))
            cons_values += np.array(value_list) 
Example 28
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 6 votes vote down vote up
def convert_to_Hessenberg_Givens_double(A):
    n, m = A.shape
    A = np.array(A, 'float')
    for i in xrange(m):
        for k in xrange(m - 1, i + 1, -1):
            c, s = Givens_rotation_double(A[k - 1, i], A[k, i])
            for j in xrange(m):
                tau_1 = A[k-1, j]
                tau_2 = A[k, j]
                A[k-1, j] = ((tau_1 * c) - (tau_2 * s))
                A[k, j] = ((tau_1 * s) + (tau_2 * c))
            for j in xrange(n):
                tau_1 = A[j, k-1]
                tau_2 = A[j, k]
                A[j, k-1] = ((tau_1 * c) - (tau_2 * s))
                A[j, k] = ((tau_1 * s) + (tau_2 * c))
    return np.triu(A, -1) 
Example 29
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 6 votes vote down vote up
def convert_to_Hessenberg_Givens_int(A):
    m, n = A.shape
    A = np.array(A, 'int64')
    for i in xrange(m):
        for k in xrange(m - 1, i + 1, -1):
            c_n, s_n, denominator = Givens_rotation_int(A[k - 1, i], A[k, i])
            for j in xrange(m):
                tau_1 = A[k-1, j]
                tau_2 = A[k, j]
                A[k-1, j] = ((tau_1 * c_n) - (tau_2 * s_n)) // denominator
                A[k, j] = ((tau_1 * s_n) + (tau_2 * c_n)) // denominator
            for j in xrange(n):
                tau_1 = A[j, k-1]
                tau_2 = A[j, k]
                A[j, k-1] = ((tau_1 * c_n) - (tau_2 * s_n)) // denominator
                A[j, k] = ((tau_1 * s_n) + (tau_2 * c_n)) // denominator

    return np.triu(A, -1) 
Example 30
Project: b2ac   Author: hbldh   File: matrix_operations.py    MIT License 6 votes vote down vote up
def matrix_add_symmetric(M, M_sym):
    """Add a regular matrix and a symmetric one.

    :param M: A [3x3] matrix to add with symmetric matrix.
    :type M: :py:class:`numpy.ndarray`
    :param M_sym: A [6x1] array to add with M.
    :type M_sym: :py:class:`numpy.ndarray`
    :return: The sum of the two matrices.
    :rtype: :py:class:`numpy.ndarray`

    """
    M[0, 0] += M_sym[0]
    M[0, 1] += M_sym[1]
    M[1, 0] += M_sym[1]
    M[0, 2] += M_sym[2]
    M[2, 0] += M_sym[2]

    M[1, 1] += M_sym[3]
    M[1, 2] += M_sym[4]
    M[2, 1] += M_sym[4]

    M[2, 2] += M_sym[5]

    return M 
Example 31
Project: b2ac   Author: hbldh   File: double.py    MIT License 6 votes vote down vote up
def _calculate_M_and_T_double(points):
    """Part of the B2AC ellipse fitting algorithm, calculating the M and T
     matrices needed.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: Matrices M and T.
    :rtype: tuple

    """
    S = _calculate_scatter_matrix_double(points[:, 0], points[:, 1])
    S1 = S[:3, :3]
    S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
    S3_inv = mo.inverse_symmetric_3by3_double(S3).reshape((3, 3))
    S2 = S[:3, 3:]
    T = -np.dot(S3_inv, S2.T)
    M_term_2 = np.dot(S2, T)
    M = S1 + M_term_2
    M[[0, 2], :] = M[[2, 0], :] / 2
    M[1, :] = -M[1, :]

    return M, T 
Example 32
Project: model-api-sequence   Author: evandowning   File: evaluation.py    GNU General Public License v3.0 6 votes vote down vote up
def sequence_generator(fn,n):
    xSet = np.array([])
    ySet = np.array([])

    x = np.array([])
    y = np.array([])

    num = 0

    # Read in sample's sequences
    with open(fn, 'rb') as fr:
        for e in enumerate(range(n)):
            t = pkl.load(fr)
            x = t[0]
            y = t[1]

            if len(xSet) == 0:
                xSet = x
                ySet = y
            else:
                xSet = np.vstack([xSet,x])
                ySet = np.append(ySet,y)

    return xSet,ySet 
Example 33
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def db(audio):
    if len(audio.shape) > 1:
        maxx = np.max(np.abs(audio), axis=1)
        return 20 * np.log10(maxx) if np.any(maxx != 0) else np.array([0])
    maxx = np.max(np.abs(audio))
    return 20 * np.log10(maxx) if maxx != 0 else np.array([0]) 
Example 34
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def save_wav(audio, output_wav_file):
    wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16))
    print('output dB', db(audio)) 
Example 35
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 36
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def getctcloss(self, input_audio_batch, target_phrase, decode=False):
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)

        if decode:
            return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
        else:
            return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) 
Example 37
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    MIT License 5 votes vote down vote up
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
Example 38
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_seg_nowubi.py    MIT License 5 votes vote down vote up
def TransRawData(test_data, vob_char_dict, vob_pinyin_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []
    X_pinyin = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        lX_pinyin = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])
            if vob_pinyin_dict.has_key(char):
                lX_pinyin.append(vob_pinyin_dict[char])
            else:
                lX_pinyin.append(vob_pinyin_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)
            lX_pinyin.append(0)

        X_char.append(lX_char)
        X_pinyin.append(lX_pinyin)

    inp.close()
    return np.array(X_char), np.array(X_pinyin) 
Example 39
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_seg_nopy.py    MIT License 5 votes vote down vote up
def TransRawData(test_data, vob_char_dict, vob_pinyin_dict, vob_wubi_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []
    X_pinyin = []
    X_wubi = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        lX_pinyin = []
        lX_wubi = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])
            if vob_pinyin_dict.has_key(char):
                lX_pinyin.append(vob_pinyin_dict[char])
            else:
                lX_pinyin.append(vob_pinyin_dict[u"<UNK>"])
            if vob_wubi_dict.has_key(char):
                lX_wubi.append(vob_wubi_dict[char])
            else:
                lX_wubi.append(vob_wubi_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)
            lX_pinyin.append(0)
            lX_wubi.append(0)

        X_char.append(lX_char)
        X_pinyin.append(lX_pinyin)
        X_wubi.append(lX_wubi)

    inp.close()
    return np.array(X_char), np.array(X_pinyin), np.array(X_wubi) 
Example 40
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_seg_nowubi.py    MIT License 5 votes vote down vote up
def TransRawData(test_data, vob_char_dict, vob_pinyin_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []
    X_pinyin = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        lX_pinyin = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])
            if vob_pinyin_dict.has_key(char):
                lX_pinyin.append(vob_pinyin_dict[char])
            else:
                lX_pinyin.append(vob_pinyin_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)
            lX_pinyin.append(0)

        X_char.append(lX_char)
        X_pinyin.append(lX_pinyin)

    inp.close()
    return np.array(X_char), np.array(X_pinyin) 
Example 41
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_seg.py    MIT License 5 votes vote down vote up
def TransRawData(test_data, vob_char_dict, vob_pinyin_dict, vob_wubi_dict, MAX_LEN):
    inp = open(test_data, 'r')
    X_char = []
    X_pinyin = []
    X_wubi = []

    for line in inp:
        ustr = line.decode("utf-8").strip()
        lX_char = []
        lX_pinyin = []
        lX_wubi = []
        for char in ustr:
            if vob_char_dict.has_key(char):
                lX_char.append(vob_char_dict[char])
            else:
                lX_char.append(vob_char_dict[u"<UNK>"])
            if vob_pinyin_dict.has_key(char):
                lX_pinyin.append(vob_pinyin_dict[char])
            else:
                lX_pinyin.append(vob_pinyin_dict[u"<UNK>"])
            if vob_wubi_dict.has_key(char):
                lX_wubi.append(vob_wubi_dict[char])
            else:
                lX_wubi.append(vob_wubi_dict[u"<UNK>"])

        for _ in xrange(len(ustr), MAX_LEN):
            lX_char.append(0)
            lX_pinyin.append(0)
            lX_wubi.append(0)

        X_char.append(lX_char)
        X_pinyin.append(lX_pinyin)
        X_wubi.append(lX_wubi)

    inp.close()
    return np.array(X_char), np.array(X_pinyin), np.array(X_wubi) 
Example 42
Project: SyNEThesia   Author: RunOrVeith   File: live_viewer.py    MIT License 5 votes vote down vote up
def __iter__(self):
        if self.stream is None or self.audio_controller is None:
            return
        else:
            for i in range(self.frames_per_buffer):
                data = self.stream.read(self.frames_per_buffer, exception_on_overflow=False)
                fmt = "<H"
                data = np.array(list(struct.iter_unpack(fmt, data)))
                yield self.feature_extractor(data) 
Example 43
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletSampler.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def hard_negative_multilabel(self):
        """Hard Negative Sampling based on multilabel assumption

        Search the negative sample with largest distance (smallest sim)
        with the anchor within self._k negative samplels
        """
        # During early iterations of sampling, use random sampling instead
        if self._iteration <= self._n:
            return self.random_multilabel()

        anchor_class_id, negative_class_id = np.random.choice(
            self._index.keys(), 2)
        anchor_id, positive_id = np.random.choice(
            self._index[anchor_class_id], 2)
        negative_ids = np.random.choice(
            self._index[negative_class_id], self._k)
        # calcualte the smallest simlarity one with negatives
        anchor_label = parse_label(self._labels[anchor_id])
        positive_label = parse_label(self._labels[positive_id])
        negative_labels = [parse_label(self._labels[negative_id]) for
                           negative_id in negative_ids]
        p_sim = intersect_sim(anchor_label, positive_label)
        n_sims = np.array(
            [intersect_sim(anchor_label, negative_label) for
             negative_label in negative_labels])
        min_sim_id = np.argmin(n_sims)
        negative_id = negative_ids[min_sim_id]
        n_sim = n_sims[min_sim_id]
        margin = p_sim - n_sim
        return (anchor_id, positive_id, negative_id, margin) 
Example 44
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: DataManager.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def load_all(self):
        """The function to load all data and labels

        Give:
        data: the list of raw data, needs to be decompressed
              (e.g., raw JPEG string)
        labels: numpy array, with each element is a string
        """
        start = time.time()
        print("Start Loading Data from BCF {}".format(
            'MEMORY' if self._bcf_mode == 'MEM' else 'FILE'))

        self._labels = np.loadtxt(self._label_fn).astype(str)

        if self._bcf.size() != self._labels.shape[0]:
            raise Exception("Number of samples in data"
                            "and labels are not equal")
        else:
            for idx in range(self._bcf.size()):
                datum_str = self._bcf.get(idx)
                self._data.append(datum_str)
        end = time.time()
        print("Loading {} samples Done: Time cost {} seconds".format(
            len(self._data), end - start))

        return self._data, self._labels 
Example 45
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: BasePythonDataLayer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_next_minibatch(self):
        """Generate next mini-batch

        The return value is array of numpy array: [data, label]
        Reshape funcion will be called based on resutls of this function

        Needs to implement in each class
        """
        pass 
Example 46
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletDataLayer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_a_datum(self):
        """Get a datum:

        Sampling -> decode images -> stack numpy array
        """
        sample = self._sampler.sample()
        if self._compressed:
            datum_ = [
                extract_sample(self._data[id], self._mean, self._resize) for
                id in sample[:3]]
        else:
            datum_ = [self._data[id] for id in sample[:3]]
        if len(sample) == 4:
            datum_.append(sample[-1])
        return datum_ 
Example 47
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletDataLayer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_a_datum(self):
        """Get a datum:

        Sampling -> decode images -> stack numpy array
        """
        sample = self._sampler.sample()
        if self._compressed:
            datum_ = [
                extract_sample(self._data[id], self._mean, self._resize) for
                id in sample[:3]]
        else:
            datum_ = [self._data[id] for id in sample[:3]]
        if len(sample) == 4:
            datum_.append(sample[-1])
        return datum_ 
Example 48
Project: PEAKachu   Author: tbischler   File: coverage.py    ISC License 5 votes vote down vote up
def _init_coverage_list(self, length):
        for strand in ["+", "-"]:
            self._coverages[strand] = np.array([0.0] * length) 
Example 49
Project: PEAKachu   Author: tbischler   File: tmm.py    ISC License 5 votes vote down vote up
def calc_size_factors(self):
        # Convert pandas dataframe to R dataframe
        r_dge = r.DGEList(self.count_df)
        # Calculate normalization factors
        r_dge = r.calcNormFactors(r_dge, method="TMM")
        size_factors = (np.array(r_dge.rx2('samples')["lib.size"]) *
                        np.array(r_dge.rx2("samples")["norm.factors"]))
        # convert to pandas series
        size_factors = pd.Series(size_factors, index=self.count_df.columns)
        # adjust size factors so that the maximum is 1.0
        size_factors = size_factors/size_factors.max()
        return size_factors 
Example 50
Project: PEAKachu   Author: tbischler   File: count.py    ISC License 5 votes vote down vote up
def count_reads_for_windows(self, replicon, strand, window_list):
        self._interval_tree = Intersecter()
        self._counts = np.array([0] * len(window_list))
        for ind, window in enumerate(window_list):
            self._interval_tree.add_interval(
                Interval(window[0],
                         window[1],
                         value=ind,
                         strand=strand))
        if self._paired_end:
            self._cache_read2(replicon)
        self._count_reads(replicon)
        return self._counts