Python numpy.empty() Examples

The following are code examples for showing how to use numpy.empty(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: ieml   Author: IEMLdev   File: _test_tables.py    GNU General Public License v3.0 6 votes vote down vote up
def test_additive_script_layer_0(self):
        script = sc("I:")
        tables = script.tables
        row_headers = [self.parser.parse("I:"), ]
        tab_headers = [self.parser.parse("I:"),]

        cells = np.empty(6, dtype=object)

        cells[0] = self.parser.parse("E:")
        cells[1] = self.parser.parse("U:")
        cells[2] = self.parser.parse("A:")
        cells[3] = self.parser.parse("S:")
        cells[4] = self.parser.parse("B:")
        cells[5] = self.parser.parse("T:")


        row_col_h = list(tables[0].headers.values())[0]

        self.assertEqual(len(tables), 1, "Correct number of tables generated")
        self.assertTrue(tables[0].cells.shape == cells.shape, "Table has the correct shape")
        self.assertEqual(row_col_h[0], row_headers, "Row headers are generated correctly")
        self.assertEqual(list(tables[0].headers), tab_headers, "Tab headers are generated correctly")
        self.assertTrue((tables[0].cells == cells).all(), "Cells are generated correctly")
        self.assertTrue(tables[0].paradigm == script, "Table has correct paradigm") 
Example 2
Project: ieml   Author: IEMLdev   File: _test_tables.py    GNU General Public License v3.0 6 votes vote down vote up
def test_1d_multiplicative_script(self):
        script = self.parser.parse("E:S:O:.")
        tables = script.tables
        row_headers = [self.parser.parse("E:S:O:.")]
        col_headers = []
        tab_headers = [self.parser.parse("E:S:O:.")]

        cells = np.empty(2, dtype=object)

        cells[0] = self.parser.parse("E:S:U:.")
        cells[1] = self.parser.parse("E:S:A:.")
        row_col_h = list(tables[0].headers.values())[0]

        self.assertEqual(len(tables), 1, "Correct number of tables generated")
        self.assertTrue(tables[0].cells.shape == cells.shape, "Table has the correct shape")
        self.assertEqual(row_col_h[0], row_headers, "Row headers are generated correctly")
        self.assertTrue((row_col_h[1] == col_headers), "Column headers are generated correctly")
        self.assertEqual(list(tables[0].headers), tab_headers, "Tab headers are generated correctly")
        self.assertTrue((tables[0].cells == cells).all(), "Cells are generated correctly")
        self.assertTrue(tables[0].paradigm == script, "Table has correct paradigm") 
Example 3
Project: ieml   Author: IEMLdev   File: _test_tables.py    GNU General Public License v3.0 6 votes vote down vote up
def test_row_of_paradigm(self):
        script = self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'O:O:.-',")
        tables = script.tables
        cells = np.empty((2, 2), dtype=object)

        row_headers = [self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'U:O:.-',"),
                       self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'A:O:.-',")]
        col_headers = [self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'O:U:.-',"),
                       self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'O:A:.-',")]
        tab_headers = [script]

        cells[0][0] = self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'wo.-',")
        cells[0][1] = self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'wa.-',")
        cells[1][0] = self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'wu.-',")
        cells[1][1] = self.parser.parse("t.i.-s.i.-'u.S:.-U:.-'we.-',")

        row_col_h = list(tables[0].headers.values())[0]

        self.assertEqual(len(tables), 1, "Correct number of tables generated")
        self.assertTrue(tables[0].cells.shape == cells.shape, "Table has the correct shape")
        self.assertEqual(row_col_h[0], row_headers, "Row headers are generated correctly")
        self.assertTrue((row_col_h[1] == col_headers), "Column headers are generated correctly")
        self.assertEqual(list(tables[0].headers), tab_headers, "Tab headers are generated correctly")
        self.assertTrue((tables[0].cells == cells).all(), "Cells are generated correctly")
        self.assertTrue(tables[0].paradigm == script, "Table has correct paradigm") 
Example 4
Project: fbpconv_tf   Author: panakino   File: unet.py    GNU General Public License v3.0 6 votes vote down vote up
def predict(self, model_path, x_test):
        """
        Uses the model to create a prediction for the given data

        :param model_path: path to the model checkpoint to restore
        :param x_test: Data to predict on. Shape [n, nx, ny, channels]
        :returns prediction: The unet prediction Shape [n, px, py, labels] (px=nx-self.offset/2)
        """

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            # Initialize variables
            sess.run(init)

            # Restore model weights from previously saved model
            self.restore(sess, model_path)

            y_dummy = np.empty((x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class))
            prediction = sess.run(self.predicter, feed_dict={self.x: x_test, self.y: y_dummy, self.keep_prob: 1.})

        return prediction 
Example 5
Project: models   Author: kipoi   File: dataloader_m.py    MIT License 6 votes vote down vote up
def map_values(values, pos, target_pos, dtype=None, nan=dat.CPG_NAN):
    """Maps `values` array at positions `pos` to `target_pos`.

    Inserts `nan` for uncovered positions.
    """
    assert len(values) == len(pos)
    assert np.all(pos == np.sort(pos))
    assert np.all(target_pos == np.sort(target_pos))

    values = values.ravel()
    pos = pos.ravel()
    target_pos = target_pos.ravel()
    idx = np.in1d(pos, target_pos)
    pos = pos[idx]
    values = values[idx]
    if not dtype:
        dtype = values.dtype
    target_values = np.empty(len(target_pos), dtype=dtype)
    target_values.fill(nan)
    idx = np.in1d(target_pos, pos).nonzero()[0]
    assert len(idx) == len(values)
    assert np.all(target_pos[idx] == pos)
    target_values[idx] = values
    return target_values 
Example 6
Project: skylab   Author: coenders   File: data.py    GNU General Public License v3.0 6 votes vote down vote up
def exp(N=100):
    r"""Create uniformly distributed data on sphere. """
    g = 3.7

    arr = np.empty((N, ), dtype=[("ra", np.float), ("sinDec", np.float),
                                 ("sigma", np.float), ("logE", np.float)])

    arr["ra"] = np.random.uniform(0., 2.*np.pi, N)
    arr["sinDec"] = np.random.uniform(-1., 1., N)

    E = np.log10(np.random.pareto(g, size=N) + 1)
    arr["sigma"] = np.random.lognormal(mean=np.log((mrs - mrs_min) * np.exp(-np.log(10)*E) + mrs_min),
                                       sigma=log_sig)
    arr["logE"] = E + logE_res * np.random.normal(size=N)

    return arr 
Example 7
Project: core   Author: lifemapper   File: mcpa.py    GNU General Public License v3.0 6 votes vote down vote up
def _beta_helper(mtx1, mtx2, weights):
    """This helper function avoids creating large temporary matrices

    Args:
        mtx1 (numpy array): A (n [sites] by i [predictors]) standardized matrix.
        mtx2 (numpy array): A (
        weights (numpy array): A (n [sites]) array of site weights
    """
    _, num_predictors = mtx1.shape
    _, num_k = mtx2.shape
    out_mtx = np.empty((num_predictors, num_k))
    for i in range(num_predictors):
        for j in range(i, num_k):
            v = np.sum(mtx1[:, i] * weights * mtx2[:, j])
            out_mtx[i, j] = v
    return out_mtx

# ............................................................................. 
Example 8
Project: core   Author: lifemapper   File: geotools.py    GNU General Public License v3.0 6 votes vote down vote up
def _cycleRow(self, scanline, arrtype, left, center, right):
      """
      @summary: Shift the values in a row to the right, so that the first 
                column in the row is shifted to the center.  Used for data in
                which a row begins with 0 degree longitude and ends with 360 
                degrees longitude (instead of -180 to +180) 
      @param scanline: Original row to shift.
      @param arrtype: Numpy datatype for scanline values
      @param left: Leftmost column index
      @param center: Center column index
      @param right: Rightmost column index
      """
      newline = numpy.empty((self.xsize), dtype=arrtype)
      c = 0
      for col in range(center, right):
         newline[c] = scanline[col]
         c += 1
      for col in range(left, center):
         newline[c] = scanline[col]
         c += 1
      return newline
   
   # ............................................................................ 
Example 9
Project: fuku-ml   Author: fukuball   File: Utility.py    MIT License 6 votes vote down vote up
def random_projection(X):

        data_demension = X.shape[1]

        new_data_demension = random.randint(2, data_demension)

        new_X = np.empty((data_demension, new_data_demension))

        minus_one = 0.1
        positive_one = 0.9

        for i in range(len(new_X)):
            for j in range(len(new_X[i])):
                rand = random.random()
                if rand < minus_one:
                    new_X[i][j] = -1.0
                elif rand >= positive_one:
                    new_X[i][j] = 1.0
                else:
                    new_X[i][j] = 0.0

        new_X = np.inner(X, new_X.T)

        return new_X 
Example 10
Project: Scene-Understanding   Author: foamliu   File: data_generator.py    MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        i = idx * batch_size

        length = min(batch_size, (len(self.ids) - i))
        X = np.empty((length, img_rows, img_cols, 3), dtype=np.float32)
        Y = np.empty((length, img_rows, img_cols, num_classes), dtype=np.float32)

        for i_batch in range(length):
            id = self.ids[i + i_batch]
            name = self.names[id]
            image = get_image(name)
            category = get_category(id)
            image, category = random_crop(image, category)

            image = cv.cvtColor(image, cv.COLOR_BGR2RGB)

            X[i_batch] = image
            Y[i_batch] = to_categorical(category, num_classes)

        X = preprocess_input(X)

        return X, Y 
Example 11
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: utils.py    Apache License 2.0 6 votes vote down vote up
def sample_categorical(prob, rng):
    """Sample from independent categorical distributions

    Each batch is an independent categorical distribution.

    Parameters
    ----------
    prob : numpy.ndarray
      Probability of the categorical distribution. Shape --> (batch_num, category_num)
    rng : numpy.random.RandomState

    Returns
    -------
    ret : numpy.ndarray
      Sampling result. Shape --> (batch_num,)
    """
    ret = numpy.empty(prob.shape[0], dtype=numpy.float32)
    for ind in range(prob.shape[0]):
        ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0,
                                                                                max=prob.shape[
                                                                                        1] - 0.5)
    return ret 
Example 12
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 6 votes vote down vote up
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
    """Return a new handle with specified shape and context.

    Empty handle is only used to hold results.

    Returns
    -------
    handle
        A new empty `NDArray` handle.
    """
    hdl = NDArrayHandle()
    check_call(_LIB.MXNDArrayCreateEx(
        c_array_buf(mx_uint, native_array('I', shape)),
        mx_uint(len(shape)),
        ctypes.c_int(ctx.device_typeid),
        ctypes.c_int(ctx.device_id),
        ctypes.c_int(int(delay_alloc)),
        ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
        ctypes.byref(hdl)))
    return hdl 
Example 13
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 6 votes vote down vote up
def asnumpy(self):
        """Returns a ``numpy.ndarray`` object with value copied from this array.

        Examples
        --------
        >>> x = mx.nd.ones((2,3))
        >>> y = x.asnumpy()
        >>> type(y)
        <type 'numpy.ndarray'>
        >>> y
        array([[ 1.,  1.,  1.],
               [ 1.,  1.,  1.]], dtype=float32)
        >>> z = mx.nd.ones((2,3), dtype='int32')
        >>> z.asnumpy()
        array([[1, 1, 1],
               [1, 1, 1]], dtype=int32)
        """
        data = np.empty(self.shape, dtype=self.dtype)
        check_call(_LIB.MXNDArraySyncCopyToCPU(
            self.handle,
            data.ctypes.data_as(ctypes.c_void_p),
            ctypes.c_size_t(data.size)))
        return data 
Example 14
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
    # bind with label
    X = mx.symbol.Variable('X')
    L = mx.symbol.Variable('L')
    Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
    x = mx.random.uniform(-1, 1, shape, ctx=xpu)
    l = mx.random.uniform(-1, 1, shape, ctx=xpu)
    l[:] = np_softmax(l.asnumpy())
    grad = mx.nd.empty(shape, ctx = xpu)
    exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
    exec1.forward(is_train=True)
    out = exec1.outputs[0].asnumpy()
    # Non-zero atol required by test_softmax with seed 781663739
    rtol = 1e-4
    atol = 1e-6
    assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
    exec1.backward()
    assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol) 
Example 15
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def test_scalarop():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)*5
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:]=3

    test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))

    npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
    npout = 2/npout_1

    check_symbolic_forward(test, [data_tmp], [npout])

    npout_grad = 2.*2/5
    npout_grad = 2*npout_grad /(npout_1 *npout_1 )

    check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad]) 
Example 16
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def test_shape_array():
    for i in range(1,6):
        shape = rand_shape_nd(i)
        x = mx.sym.var('x')
        y = mx.sym.shape_array(x)
        xa = mx.nd.array(np.random.ranf(shape))
        xg = mx.nd.empty(xa.shape)
        ya = np.shape(xa)
        yg = mx.nd.ones(ya)
        exe = y.bind(ctx=default_context(), args={'x': xa},
                     args_grad={'x': xg})
        exe.forward(is_train=True)
        exe.backward([yg])
        yo = exe.outputs[0].asnumpy()
        same(yo, ya)
        assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy())) 
Example 17
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def test_binary_op_duplicate_input():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:] = 5
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:] = 3
    out_grad = mx.nd.empty(shape)
    out_grad[:] = 1
    square = data * data
    exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
    exe_square.forward(is_train=True)
    assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
    exe_square.backward(out_grad)
    assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp) 
Example 18
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def test_sign():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:]=5
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:]=3

    test = mx.sym.sign(data)
    exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout = np.sign(data_tmp)
    assert_almost_equal(out, npout)

    out_grad = mx.nd.empty(shape)
    out_grad[:] = 2;
    npout_grad = out_grad.asnumpy()
    npout_grad = 0;
    exe_test.backward(out_grad)
    assert_almost_equal(arr_grad.asnumpy(), npout_grad) 
Example 19
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def test_rsqrt_cos_sin():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:]=5
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:]=3

    test =  mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
    exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout =  1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
    assert_almost_equal(out, npout)

    out_grad = mx.nd.empty(shape)
    out_grad[:] = 2;
    npout_grad = out_grad.asnumpy()
    npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
    exe_test.backward(out_grad)
    assert_almost_equal(arr_grad.asnumpy(), npout_grad) 
Example 20
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def test_abs():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:]=5
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:]=3

    test = mx.sym.abs(data)
    exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout = abs(data_tmp)
    assert_almost_equal(out, npout)

    out_grad = mx.nd.empty(shape)
    out_grad[:] = 2;
    npout_grad = out_grad.asnumpy()
    npout_grad = npout_grad * np.sign(data_tmp)
    exe_test.backward(out_grad)
    assert_almost_equal(arr_grad.asnumpy(), npout_grad) 
Example 21
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
    # bind with label
    X = mx.symbol.Variable('X', dtype=dtype)
    Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
    x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
    # numpy result
    pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
    np_out = np.pad(x.asnumpy(), pad_grouped, mode)
    # mxnet result
    grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
    exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
    exec1.forward(is_train=True)
    out = exec1.outputs[0].asnumpy()
    # compare numpy + mxnet
    assert_almost_equal(out, np_out)
    # grad check
    check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2) 
Example 22
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 5 votes vote down vote up
def udacity_val_gen(data, batch_size):

    """
    Generate training image give image paths and associated steering angles
    """

    images = np.empty([batch_size, configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 3], dtype=np.int32)
    labels = np.empty([batch_size])

    while True:

        c = 0

        for index in np.random.permutation(data.shape[0]):

            imgs = np.empty([configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 3], dtype=np.int32)

            if index < configs.LENGTH:
                start = 0
                end = configs.LENGTH
            elif index + configs.LENGTH >= len(data):
                start = len(data) - configs.LENGTH - 1
                end = len(data) - 1
            else:
                start = index
                end = index + configs.LENGTH

            for i in range(start, end):
                center_path = '/home/neil/dataset/steering/test/center/' + str(data['frame_id'].loc[i]) + ".jpg"
                image = load_image(center_path)
                imgs[i - start] = image

            images[c] = imgs
            labels[c] = data['steering_angle'].loc[end]

            c += 1

            if c == batch_size:
                break

        yield images, labels 
Example 23
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: anchor_target_layer.py    MIT License 5 votes vote down vote up
def _unmap(data, count, inds, fill=0):
  """ Unmap a subset of item (data) back to the original set of items (of
  size count) """
  if len(data.shape) == 1:
    ret = np.empty((count,), dtype=np.float32)
    ret.fill(fill)
    ret[inds] = data
  else:
    ret = np.empty((count,) + data.shape[1:], dtype=np.float32)
    ret.fill(fill)
    ret[inds, :] = data
  return ret 
Example 24
Project: cgp-cnn   Author: sg-nm   File: cgp.py    MIT License 5 votes vote down vote up
def __init__(self, net_info):
        self.net_info = net_info
        self.gene = np.zeros((self.net_info.node_num + self.net_info.out_num, self.net_info.max_in_num + 1)).astype(int)
        self.is_active = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)
        self.eval = None
        self.init_gene() 
Example 25
Project: cgp-cnn   Author: sg-nm   File: cgp.py    MIT License 5 votes vote down vote up
def evolution(self, max_eval=100, mutation_rate=0.01, log_file='./log.txt'):
        with open(log_file, 'w') as fw:
            writer = csv.writer(fw, lineterminator='\n')

            eval_flag = np.empty(self.lam)

            self._evaluation([self.pop[0]], np.array([True]))
            print(self._log_data(net_info_type='active_only'))

            while self.num_eval < max_eval:
                self.num_gen += 1

                # reproduction
                for i in range(self.lam):
                    self.pop[i+1].copy(self.pop[0])    # copy a parent
                    eval_flag[i] = self.pop[i+1].mutation(mutation_rate)    # mutation

                # evaluation and selection
                evaluations = self._evaluation(self.pop[1:], eval_flag=eval_flag)
                best_arg = evaluations.argmax()
                if evaluations[best_arg] >= self.pop[0].eval:
                    self.pop[0].copy(self.pop[best_arg+1])

                # display and save log
                if eval_flag.sum() > 0:
                    print(self._log_data(net_info_type='active_only'))
                    writer.writerow(self._log_data(net_info_type='full'))

    # Modified CGP (used for GECCO 2017 paper):
    #   At each iteration:
    #     - Generate lambda individuals in which at least one active node changes (i.e., forced mutation)
    #     - Mutate the best individual with neutral mutation (unchanging the active nodes)
    #         if the best individual is not updated. 
Example 26
Project: cgp-cnn   Author: sg-nm   File: cgp.py    MIT License 5 votes vote down vote up
def modified_evolution(self, max_eval=100, mutation_rate=0.01, log_file='./log.txt'):
        with open(log_file, 'w') as fw:
            writer = csv.writer(fw, lineterminator='\n')

            eval_flag = np.empty(self.lam)

            active_num = self.pop[0].count_active_node()
            while active_num < self.pop[0].net_info.min_active_num or active_num > self.pop[0].net_info.max_active_num:
                self.pop[0].mutation(1.0)
                active_num = self.pop[0].count_active_node()
            self._evaluation([self.pop[0]], np.array([True]))
            print(self._log_data(net_info_type='active_only'))

            while self.num_eval < max_eval:
                self.num_gen += 1

                # reproduction
                for i in range(self.lam):
                    eval_flag[i] = False
                    self.pop[i + 1].copy(self.pop[0])  # copy a parent
                    active_num = self.pop[i + 1].count_active_node()

                    # forced mutation
                    while not eval_flag[i] or active_num < self.pop[i + 1].net_info.min_active_num \
                            or active_num > self.pop[i + 1].net_info.max_active_num:
                        self.pop[i + 1].copy(self.pop[0])  # copy a parent
                        eval_flag[i] = self.pop[i + 1].mutation(mutation_rate)  # mutation
                        active_num = self.pop[i + 1].count_active_node()

                # evaluation and selection
                evaluations = self._evaluation(self.pop[1:], eval_flag=eval_flag)
                best_arg = evaluations.argmax()
                if evaluations[best_arg] > self.pop[0].eval:
                    self.pop[0].copy(self.pop[best_arg + 1])
                else:
                    self.pop[0].neutral_mutation(mutation_rate)  # neutral mutation

                # display and save log
                print(self._log_data(net_info_type='active_only'))
                writer.writerow(self._log_data(net_info_type='full')) 
Example 27
Project: FRIDA   Author: LCAV   File: mkl_fft.py    MIT License 5 votes vote down vote up
def cce2full(A):

    # Assume all square for now

    N = A.shape
    N_half = N[0]//2 + 1
    out = np.empty((A.shape[0], A.shape[0]), dtype=A.dtype)
    out[:, :N_half] = A

    out[1:, N_half:] = np.rot90(A[1:, 1:-1], 2).conj()

    # Complete the first row
    out[0, N_half:] = A[0, -2:0:-1].conj()

    return out 
Example 28
Project: SmartPiCam   Author: robodhhb   File: smartPiCamContr.py    MIT License 5 votes vote down vote up
def takePhoto(self):
        picData = np.empty((self.cameraResolution[1],
                            self.cameraResolution[0], 3),
                            dtype=np.uint8)
        self.cam.capture(picData, format= 'rgb', use_video_port=self.useVideoPort) #24bit rgb format
        # Coco-Model requires 300 x 300 resolution
        # Remove last 4 rows and last 4 colummns in all 3 dimensions
        picData= picData[:-4, :-4]
        return picData
    
    # Function to read labels from text files. 
Example 29
Project: ieml   Author: IEMLdev   File: _test_tables.py    GNU General Public License v3.0 5 votes vote down vote up
def test_2d_multiplicative_script(self):
        script = self.parser.parse("M:.E:A:M:.-")
        tables = script.tables
        row_headers = [self.parser.parse("S:.E:A:M:.-"), self.parser.parse("B:.E:A:M:.-"),
                       self.parser.parse("T:.E:A:M:.-")]
        col_headers = [self.parser.parse("M:.E:A:S:.-"), self.parser.parse("M:.E:A:B:.-"),
                       self.parser.parse("M:.E:A:T:.-")]
        tab_headers = [script]
        cells = np.empty((3, 3), dtype=object)

        cells[0][0] = self.parser.parse("S:.E:A:S:.-")
        cells[0][1] = self.parser.parse("S:.E:A:B:.-")
        cells[0][2] = self.parser.parse("S:.E:A:T:.-")
        cells[1][0] = self.parser.parse("B:.E:A:S:.-")
        cells[1][1] = self.parser.parse("B:.E:A:B:.-")
        cells[1][2] = self.parser.parse("B:.E:A:T:.-")
        cells[2][0] = self.parser.parse("T:.E:A:S:.-")
        cells[2][1] = self.parser.parse("T:.E:A:B:.-")
        cells[2][2] = self.parser.parse("T:.E:A:T:.-")

        row_col_h = list(tables[0].headers.values())[0]

        self.assertEqual(len(tables), 1, "Correct number of tables generated")
        self.assertTrue(tables[0].cells.shape == cells.shape, "Table has the correct shape")
        self.assertEqual(row_col_h[0], row_headers, "Row headers are generated correctly")
        self.assertTrue((row_col_h[1] == col_headers), "Column headers are generated correctly")
        self.assertEqual(list(tables[0].headers), tab_headers, "Tab headers are generated correctly")
        self.assertTrue((tables[0].cells == cells).all(), "Cells are generated correctly")
        self.assertTrue(tables[0].paradigm == script, "Table has correct paradigm") 
Example 30
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_quaternion(R):
    d = np.diagonal(R)
    t = np.sum(d)
    if t + 1 < 0.25:
        symmetric_mat = R + R.T
        asymmetric_mat = R - R.T
        symmetric_diag = np.diagonal(symmetric_mat)
        i_max = np.argmax(symmetric_diag)
        q = np.empty(4)
        if i_max == 0:
            q[1] = np.sqrt(symmetric_diag[0] - t + 1) / 2
            normalizer = 1 / q[1]
            q[2] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 0] / 4 * normalizer
            q[0] = asymmetric_mat[2, 1] / 4 * normalizer
        elif i_max == 1:
            q[2] = np.sqrt(symmetric_diag[1] - t + 1) / 2
            normalizer = 1 / q[2]
            q[1] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 1] / 4 * normalizer
            q[0] = asymmetric_mat[0, 2] / 4 * normalizer
        elif i_max == 2:
            q[3] = np.sqrt(symmetric_diag[2] - t + 1) / 2
            normalizer = 1 / q[3]
            q[1] = symmetric_mat[2, 0] / 4 * normalizer
            q[2] = symmetric_mat[1, 2] / 4 * normalizer
            q[0] = asymmetric_mat[1, 0] / 4 * normalizer
    else:
        r = np.sqrt(1+t)
        s = 0.5 / r
        q = np.array([0.5*r, (R[2, 1] - R[1, 2])*s, (R[0, 2] - R[2, 0])*s, (R[1, 0] - R[0, 1])*s])

    return q 
Example 31
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: minibatch.py    MIT License 5 votes vote down vote up
def get_minibatch(roidb, num_classes):
    """Given a roidb, construct a minibatch sampled from it."""
    num_images = len(roidb)
    # Sample random scales to use for each image in this batch
    random_scale_inds = npr.randint(0, high=len(cfg.FLAGS2["scales"]),
                                    size=num_images)
    assert (cfg.FLAGS.batch_size % num_images == 0), 'num_images ({}) must divide BATCH_SIZE ({})'.format(num_images, cfg.FLAGS.batch_size)

    # Get the input image blob, formatted for caffe
    im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

    blobs = {'data': im_blob}

    assert len(im_scales) == 1, "Single batch only"
    assert len(roidb) == 1, "Single batch only"

    # gt boxes: (x1, y1, x2, y2, cls)
    if cfg.FLAGS.use_all_gt:
        # Include all ground truth boxes
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
    else:
        # For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
        gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
    gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
    gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
    gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    blobs['gt_boxes'] = gt_boxes
    blobs['im_info'] = np.array(
        [[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
        dtype=np.float32)

    return blobs 
Example 32
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: anchor_target_layer.py    MIT License 5 votes vote down vote up
def _unmap(data, count, inds, fill=0):
    """ Unmap a subset of item (data) back to the original set of items (of
    size count) """
    if len(data.shape) == 1:
        ret = np.empty((count,), dtype=np.float32)
        ret.fill(fill)
        ret[inds] = data
    else:
        ret = np.empty((count,) + data.shape[1:], dtype=np.float32)
        ret.fill(fill)
        ret[inds, :] = data
    return ret 
Example 33
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 5 votes vote down vote up
def _init_graph(self):
        # Collect inputs.
        self.input_names = []
        for param in inspect.signature(self._build_func).parameters.values():
            if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
                self.input_names.append(param.name)
        self.num_inputs = len(self.input_names)
        assert self.num_inputs >= 1

        # Choose name and scope.
        if self.name is None:
            self.name = self._build_func_name
        self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
        
        # Build template graph.
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            assert tf.get_variable_scope().name == self.scope
            with absolute_name_scope(self.scope): # ignore surrounding name_scope
                with tf.control_dependencies(None): # ignore surrounding control_dependencies
                    self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
                    out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
            
        # Collect outputs.
        assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
        self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
        self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
        self.num_outputs = len(self.output_templates)
        assert self.num_outputs >= 1
        
        # Populate remaining fields.
        self.input_shapes   = [shape_to_list(t.shape) for t in self.input_templates]
        self.output_shapes  = [shape_to_list(t.shape) for t in self.output_templates]
        self.input_shape    = self.input_shapes[0]
        self.output_shape   = self.output_shapes[0]
        self.vars           = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
        self.trainables     = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])

    # Run initializers for all variables defined by this network. 
Example 34
Project: Electrolyte_Analysis_FTIR   Author: Samuel-Buteau   File: Constant_run.py    MIT License 5 votes vote down vote up
def get(self, n):
        """
        will return a list of n random numbers in self.GetFresh_list
        - Samuel Buteau, October 2018
        """
        if n >= self.get_fresh_count:
            return numpy.concatenate((self.get(int(n/2)),self.get(n- int(n/2))))


        reshuffle_flag = False

        n_immediate_fulfill = min(n, self.get_fresh_count - self.get_fresh_pos)
        batch_of_indecies = numpy.empty([n], dtype=numpy.int32)
        for i in range(0, n_immediate_fulfill):
            batch_of_indecies[i] = self.GetFresh_list[i + self.get_fresh_pos]

        self.get_fresh_pos += n_immediate_fulfill
        if self.get_fresh_pos >= self.get_fresh_count:
            self.get_fresh_pos -= self.get_fresh_count
            reshuffle_flag = True

            # Now, the orders that needed to be satisfied are satisfied.
        n_delayed_fulfill = max(0, n - n_immediate_fulfill)
        if reshuffle_flag:
            numpy.random.shuffle(self.GetFresh_list)

        if n_delayed_fulfill > 0:
            for i in range(0, n_delayed_fulfill):
                batch_of_indecies[i + n_immediate_fulfill] = self.GetFresh_list[i]
            self.get_fresh_pos = n_delayed_fulfill

        return batch_of_indecies 
Example 35
Project: mmdetection   Author: open-mmlab   File: transforms.py    Apache License 2.0 5 votes vote down vote up
def _pad_masks(self, results):
        pad_shape = results['pad_shape'][:2]
        for key in results.get('mask_fields', []):
            padded_masks = [
                mmcv.impad(mask, pad_shape, pad_val=self.pad_val)
                for mask in results[key]
            ]
            if padded_masks:
                results[key] = np.stack(padded_masks, axis=0)
            else:
                results[key] = np.empty((0, ) + pad_shape, dtype=np.uint8) 
Example 36
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def nans(shape, dtype=float):
    a = np.empty(shape, dtype)
    a.fill(np.nan)
    return a 
Example 37
Project: skylab   Author: coenders   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, eta, df, loc=0., scale=1.):
        params = np.empty(shape=(2, 3))
        params[:, 0] = df
        params[:, 1] = loc
        params[:, 2] = scale

        self.eta = eta
        self.params = params

        self._chi2 = tuple(scipy.stats.chi2(*p) for p in params)

        self.eta_err = np.nan
        self.ks = (np.nan, np.nan) 
Example 38
Project: skylab   Author: coenders   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, df=np.nan, left={}, right={}):
        self.df = np.empty(2)
        self.df[:] = df
        self.others = (left, right) 
Example 39
Project: core   Author: lifemapper   File: geotools.py    GNU General Public License v3.0 5 votes vote down vote up
def getArray(self, bandnum, doFlip=False, doShift=False):
      """
      @summary: Read the dataset into numpy array  
      @param bandnum: The band number to read.
      @param doFlip: True if data begins at the southern edge of the region
      @param doShift: True if the leftmost edge of the data should be shifted 
             to the center (and right half shifted around to the beginning) 
      """
      if 'numpy' in dir():
         inds = gdal.Open(self.dlocation, gdalconst.GA_ReadOnly)
         inband = inds.GetRasterBand(bandnum)
         arrtype = self._getNumpyType(self.gdalBandType)
         outArr = numpy.empty([self.ysize, self.xsize], dtype=arrtype)
         
         for row in range(self.ysize):
            scanline = inband.ReadAsArray(0, row, self.xsize, 1, self.xsize, 1)
            
            if doShift:
               scanline = self._cycleRow(scanline, arrtype, 0, self.xsize/2, 
                                         self.xsize)
            if doFlip:
               newrow = self.ysize-row-1
            else:
               newrow = row
   
            outArr[newrow] = scanline
   
         inds = None   
         return outArr
      else:
         raise LMError('numpy missing - unable to getArray')
      
# ............................................. 
Example 40
Project: mmi-tagger   Author: karlstratos   File: evaluate.py    MIT License 5 votes vote down vote up
def compute_v_measure(tseqs, zseqs):
    num_instances = 0
    t2i = {}
    z2i = {}
    cocount = Counter()
    for i in range(len(tseqs)):
        for (t, z) in zip(tseqs[i], zseqs[i]):
            num_instances += 1
            if not t in t2i: t2i[t] = len(t2i)
            if not z in z2i: z2i[z] = len(z2i)
            cocount[(t2i[t], z2i[z])] += 1

    B = np.empty([len(t2i), len(z2i)])
    for i in range(len(t2i)):
        for j in range(len(z2i)):
            B[i, j] = cocount[(i, j)] / num_instances

    p_T = np.sum(B, axis=1)
    p_Z = np.sum(B, axis=0)
    H_T = sum([- p_T[i] * np.log2(p_T[i]) for i in range(len(t2i))])
    H_Z = sum([- p_Z[i] * np.log2(p_Z[i]) for i in range(len(z2i))])

    H_T_given_Z = 0
    for j in range(len(z2i)):
        for i in range(len(t2i)):
            if B[i, j] > 0.0:
                H_T_given_Z -= B[i, j] * \
                               (np.log2(B[i, j]) - np.log2(p_Z[j]))
    H_Z_given_T = 0
    for j in range(len(t2i)):
        for i in range(len(z2i)):
            if B[j, i] > 0.0:
                H_Z_given_T -= B[j, i] * \
                               (np.log2(B[j, i]) - np.log2(p_T[j]))

    h = 1 if len(t2i) == 1 else 1 - H_T_given_Z / H_T
    c = 1 if len(z2i) == 1 else 1 - H_Z_given_T / H_Z

    return 2 * h * c / (h + c) * 100.0 
Example 41
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: data_generation.py    MIT License 5 votes vote down vote up
def __data_generation(self, list_IDs_temp):
        'Generates data containing batch_size samples'  # X : (n_samples, *dim, n_channels)
        # Initialization
        X_tra = np.empty([self.batch_size, self.volumeSize_slices, 4*self.volumeSize_slices, 4*self.volumeSize_slices,self.n_channels])
        X_sag = np.empty([self.batch_size, 4*self.volumeSize_slices,4*self.volumeSize_slices,self.volumeSize_slices, self.n_channels])
        X_cor = np.empty([self.batch_size, 4*self.volumeSize_slices,self.volumeSize_slices,4*self.volumeSize_slices, self.n_channels])
        Y = np.empty([self.batch_size, 4*self.volumeSize_slices,4*self.volumeSize_slices,4*self.volumeSize_slices,1], dtype=np.uint8)


        # Generate data
        for i, ID in enumerate(list_IDs_temp):
            # load sample
            roi_tra = sitk.ReadImage(self.data_dir + '/' + ID + '/roi_tra.nrrd')
            roi_cor = sitk.ReadImage(self.data_dir + '/' + ID + '/roi_cor.nrrd')
            roi_sag = sitk.ReadImage(self.data_dir + '/' + ID + '/roi_sag.nrrd')
            roi_GT = sitk.ReadImage(self.data_dir + '/' + ID + '/roi_GT.nrrd')

            # augment sample
            augm_tra, augm_sag, augm_cor, augm_GT = dataAugmentation.augmentImages(roi_tra, roi_sag, roi_cor, roi_GT)

            # crop ROIS to input size of network
            # get size of uncropped ROI
            inPlaneSize = roi_tra.GetSize()[0]
            a = int((inPlaneSize - 4*self.volumeSize_slices) / 2)
            b = int((inPlaneSize/4 - self.volumeSize_slices) / 2)
            augm_tra = utils.cropImage(augm_tra, [a, a, b], [a, a, b])
            augm_cor = utils.cropImage(augm_cor, [a, b, a], [a, b, a])
            augm_sag = utils.cropImage(augm_sag, [b, a, a], [b, a, a])
            augm_GT = utils.cropImage(augm_GT, [a, a, a], [a, a, a])


            # store augmented sample
            X_tra[i, :, :, :, 0] = sitk.GetArrayFromImage(augm_tra)
            X_cor[i, :, :, :, 0] = sitk.GetArrayFromImage(augm_cor)
            X_sag[i, :, :, :, 0] = sitk.GetArrayFromImage(augm_sag)
            Y[i, :, :, :, 0] = sitk.GetArrayFromImage(augm_GT)


        return [X_tra, X_cor,X_sag] , [Y] 
Example 42
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: atari_game.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 rom_path=_default_rom_path,
                 frame_skip=4, history_length=4,
                 resize_mode='scale', resized_rows=84, resized_cols=84, crop_offset=8,
                 display_screen=False, max_null_op=30,
                 replay_memory_size=1000000,
                 replay_start_size=100,
                 death_end_episode=True):
        super(AtariGame, self).__init__()
        self.rng = get_numpy_rng()
        self.ale = ale_load_from_rom(rom_path=rom_path, display_screen=display_screen)
        self.start_lives = self.ale.lives()
        self.action_set = self.ale.getMinimalActionSet()
        self.resize_mode = resize_mode
        self.resized_rows = resized_rows
        self.resized_cols = resized_cols
        self.crop_offset = crop_offset
        self.frame_skip = frame_skip
        self.history_length = history_length
        self.max_null_op = max_null_op
        self.death_end_episode = death_end_episode
        self.screen_buffer_length = 2
        self.screen_buffer = numpy.empty((self.screen_buffer_length,
                                          self.ale.getScreenDims()[1], self.ale.getScreenDims()[0]),
                                         dtype='uint8')
        self.replay_memory = ReplayMemory(state_dim=(resized_rows, resized_cols),
                                          history_length=history_length,
                                          memory_size=replay_memory_size,
                                          replay_start_size=replay_start_size)
        self.start() 
Example 43
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: replay_memory.py    Apache License 2.0 5 votes vote down vote up
def sample_last(self, batch_size, states, offset):
        assert self.size >= batch_size and self.replay_start_size >= self.history_length
        assert(0 <= self.size <= self.memory_size)
        assert(0 <= self.top <= self.memory_size)
        if self.size <= self.replay_start_size:
            raise ValueError("Size of the effective samples of the ReplayMemory must be "
                             "bigger than start_size! Currently, size=%d, start_size=%d"
                             %(self.size, self.replay_start_size))
        actions = numpy.empty((batch_size,) + self.action_dim, dtype=self.actions.dtype)
        rewards = numpy.empty(batch_size, dtype='float32')
        terminate_flags = numpy.empty(batch_size, dtype='bool')
        counter = 0
        first_index = self.top - self.history_length - 1
        while counter < batch_size:
            full_indices = numpy.arange(first_index, first_index + self.history_length+1)
            end_index = first_index + self.history_length
            if numpy.any(self.terminate_flags.take(full_indices[0:self.history_length], mode='wrap')):
                # Check if terminates in the middle of the sample!
                first_index -= 1
                continue
            states[counter + offset] = self.states.take(full_indices, axis=0, mode='wrap')
            actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
            rewards[counter] = self.rewards.take(end_index, mode='wrap')
            terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
            counter += 1
            first_index -= 1
        return actions, rewards, terminate_flags 
Example 44
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: replay_memory.py    Apache License 2.0 5 votes vote down vote up
def sample_mix(self, batch_size, states, offset, current_index):
        assert self.size >= batch_size and self.replay_start_size >= self.history_length
        assert(0 <= self.size <= self.memory_size)
        assert(0 <= self.top <= self.memory_size)
        if self.size <= self.replay_start_size:
            raise ValueError("Size of the effective samples of the ReplayMemory must be bigger than "
                             "start_size! Currently, size=%d, start_size=%d"
                             %(self.size, self.replay_start_size))
        actions = numpy.empty((batch_size,) + self.action_dim, dtype=self.actions.dtype)
        rewards = numpy.empty(batch_size, dtype='float32')
        terminate_flags = numpy.empty(batch_size, dtype='bool')
        counter = 0
        first_index = self.top - self.history_length + current_index
        thisid = first_index
        while counter < batch_size:
            full_indices = numpy.arange(thisid, thisid + self.history_length+1)
            end_index = thisid + self.history_length
            if numpy.any(self.terminate_flags.take(full_indices[0:self.history_length], mode='wrap')):
                # Check if terminates in the middle of the sample!
                thisid -= 1
                continue
            states[counter+offset] = self.states.take(full_indices, axis=0, mode='wrap')
            actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
            rewards[counter] = self.rewards.take(end_index, mode='wrap')
            terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
            counter += 1
            thisid = self.rng.randint(low=self.top - self.size, high=self.top - self.history_length-1)
        return actions, rewards, terminate_flags 
Example 45
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: image.py    Apache License 2.0 5 votes vote down vote up
def get_image(roi_rec, short, max_size, mean, std):
    """
    read, resize, transform image, return im_tensor, im_info, gt_boxes
    roi_rec should have keys: ["image", "boxes", "gt_classes", "flipped"]
    0 --- x (width, second dim of im)
    |
    y (height, first dim of im)
    """
    im = imdecode(roi_rec['image'])
    if roi_rec["flipped"]:
        im = im[:, ::-1, :]
    im, im_scale = resize(im, short, max_size)
    height, width = im.shape[:2]
    im_info = np.array([height, width, im_scale], dtype=np.float32)
    im_tensor = transform(im, mean, std)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roi_rec['gt_classes'].size > 0:
        gt_inds = np.where(roi_rec['gt_classes'] != 0)[0]
        gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roi_rec['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roi_rec['gt_classes'][gt_inds]
        # scale gt_boxes
        gt_boxes[:, 0:4] *= im_scale
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    return im_tensor, im_info, gt_boxes 
Example 46
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 5 votes vote down vote up
def _new_empty_handle():
    """Returns a new empty handle.

    Empty handle can be used to hold a result.

    Returns
    -------
    handle
        A new empty `NDArray` handle.
    """
    hdl = NDArrayHandle()
    check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
    return hdl 
Example 47
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 5 votes vote down vote up
def astype(self, dtype, copy=True):
        """Returns a copy of the array after casting to a specified type.

        Parameters
        ----------
        dtype : numpy.dtype or str
            The type of the returned array.
        copy : bool
            Default `True`. By default, astype always returns a newly
            allocated ndarray on the same context. If this is set to
            `False`, and the dtype requested is the same as the ndarray's
            dtype, the ndarray is returned instead of a copy.

        Returns
        -------
        NDArray, CSRNDArray or RowSparseNDArray
            The copied array after casting to the specified type, or
            the same array if copy=False and dtype is the same as the input
            array.

        Examples
        --------
        >>> x = mx.nd.zeros((2,3), dtype='float32')
        >>> y = x.astype('int32')
        >>> y.dtype
        <type 'numpy.int32'>
        """

        if not copy and np.dtype(dtype) == self.dtype:
            return self

        res = empty(self.shape, ctx=self.context, dtype=dtype)
        self.copyto(res)
        return res 
Example 48
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 5 votes vote down vote up
def ones(shape, ctx=None, dtype=None, **kwargs):
    """Returns a new array filled with all ones, with the given shape and type.

    Parameters
    ----------
    shape : int or tuple of int or list of int
        The shape of the empty array.
    ctx : Context, optional
        An optional device context.
        Defaults to the current default context (``mxnet.context.current_context()``).
    dtype : str or numpy.dtype, optional
        An optional value type (default is `float32`).
    out : NDArray, optional
        The output NDArray (default is `None`).

    Returns
    -------
    NDArray
        A new array of the specified shape filled with all ones.

    Examples
    --------
    >>> mx.nd.ones(1).asnumpy()
    array([ 1.], dtype=float32)
    >>> mx.nd.ones((1,2), mx.gpu(0))
    <NDArray 1x2 @gpu(0)>
    >>> mx.nd.ones((1,2), dtype='float16').asnumpy()
    array([[ 1.,  1.]], dtype=float16)
    """
    # pylint: disable= unused-argument
    if ctx is None:
        ctx = current_context()
    dtype = mx_real_t if dtype is None else dtype
    # pylint: disable= no-member, protected-access
    return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
    # pylint: enable= no-member, protected-access 
Example 49
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 5 votes vote down vote up
def array(source_array, ctx=None, dtype=None):
    """Creates an array from any object exposing the array interface.

    Parameters
    ----------
    source_array : array_like
        An object exposing the array interface, an object whose `__array__`
        method returns an array, or any (nested) sequence.
    ctx : Context, optional
        Device context (default is the current default context).
    dtype : str or numpy.dtype, optional
        The data type of the output array. The default dtype is ``source_array.dtype``
        if `source_array` is an `NDArray`, `float32` otherwise.

    Returns
    -------
    NDArray
        An `NDArray` with the same contents as the `source_array`.
    """
    if isinstance(source_array, NDArray):
        dtype = source_array.dtype if dtype is None else dtype
    else:
        dtype = mx_real_t if dtype is None else dtype
        if not isinstance(source_array, np.ndarray):
            try:
                source_array = np.array(source_array, dtype=dtype)
            except:
                raise TypeError('source_array must be array like object')
    arr = empty(source_array.shape, ctx, dtype)
    arr[:] = source_array
    return arr 
Example 50
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 5 votes vote down vote up
def zeros(shape, ctx=None, dtype=None, **kwargs):
    """Returns a new array filled with all zeros, with the given shape and type.

    Parameters
    ----------
    shape : int or tuple of int
        The shape of the empty array.
    ctx : Context, optional
        An optional device context (default is the current default context).
    dtype : str or numpy.dtype, optional
        An optional value type (default is `float32`).
    out : NDArray, optional
        The output NDArray (default is `None`).

    Returns
    -------
    NDArray
        A created array

    Examples
    --------
    >>> mx.nd.zeros(1).asnumpy()
    array([ 0.], dtype=float32)
    >>> mx.nd.zeros((1,2), mx.gpu(0))
    <NDArray 1x2 @gpu(0)>
    >>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy()
    array([[ 0.,  0.]], dtype=float16)
    """
    # pylint: disable= unused-argument
    if ctx is None:
        ctx = current_context()
    dtype = mx_real_t if dtype is None else dtype
    # pylint: disable= no-member, protected-access
    return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
    # pylint: enable= no-member, protected-access 
Example 51
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 5 votes vote down vote up
def empty(shape, ctx=None, dtype=None):
    """Returns a new array of given shape and type, without initializing entries.

    Parameters
    ----------
    shape : int or tuple of int
        The shape of the empty array.
    ctx : Context, optional
        An optional device context (default is the current default context).
    dtype : str or numpy.dtype, optional
        An optional value type (default is `float32`).

    Returns
    -------
    NDArray
        A created array.

    """
    if isinstance(shape, int):
        shape = (shape, )
    if ctx is None:
        ctx = current_context()
    if dtype is None:
        dtype = mx_real_t
    return NDArray(handle=_new_alloc_handle(shape, ctx, False, dtype))


# pylint: disable= redefined-builtin 
Example 52
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: mxnet_predict.py    Apache License 2.0 5 votes vote down vote up
def get_output(self, index):
        """Get the index-th output.

        Parameters
        ----------
        index : int
            The index of output.

        Returns
        -------
        out : numpy array.
            The output array.
        """
        pdata = ctypes.POINTER(mx_uint)()
        ndim = mx_uint()
        _check_call(_LIB.MXPredGetOutputShape(
            self.handle, index,
            ctypes.byref(pdata),
            ctypes.byref(ndim)))
        shape = tuple(pdata[:ndim.value])
        data = np.empty(shape, dtype=np.float32)
        _check_call(_LIB.MXPredGetOutput(
            self.handle, mx_uint(index),
            data.ctypes.data_as(mx_float_p),
            mx_uint(data.size)))
        return data 
Example 53
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def check_softmax_with_ignore_label(xpu):
    X = mx.symbol.Variable('X')
    L = mx.symbol.Variable('L')
    Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)

    shape = (20, 10)
    x = mx.nd.empty(shape, ctx = xpu)
    l = mx.nd.empty((shape[0],), ctx = xpu)
    x_np = np.random.rand(*shape)
    l_np = np.random.randint(0, shape[1]-1, (shape[0],))
    x[:] = x_np
    l[:] = l_np

    grad = mx.nd.empty(shape, ctx = xpu)

    exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
    exec1.forward(is_train=True)
    exec1.backward()

    grad0 = grad.asnumpy()

    for i in range(int(shape[0]/2)):
        l_np[i] = 0
    l[:] = l_np

    exec1.forward(is_train=True)
    exec1.backward()
    grad1 = grad.asnumpy()

    assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
    assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):]) 
Example 54
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_round_ceil_floor():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:]=5.543
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:]= 2

    test = mx.sym.round(data) + mx.sym.ceil(data) +  mx.sym.floor(data)
    exe_test = test.bind(default_context(), args=[arr_data])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
    assert_almost_equal(out, npout) 
Example 55
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_maximum_minimum():
    data1 = mx.symbol.Variable('data')
    data2 = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp1 = np.random.rand(3,4)
    data_tmp2 = np.random.rand(3,4)
    data_tmp1[:] = 2
    data_tmp2[:] = 3

    arr_data1 = mx.nd.array(data_tmp1)
    arr_data2 = mx.nd.array(data_tmp2)

    arr_grad1 = mx.nd.empty(shape)
    arr_grad2 = mx.nd.empty(shape)

    test =  mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2);
    exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout =  np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
    assert_almost_equal(out, npout)

    out_grad = mx.nd.empty(shape)
    out_grad[:] = 2
    exe_test.backward(out_grad)

    npout_grad = np.ones(shape)
    npout_grad[:] = 2
    mask1 = (data_tmp1 > data_tmp2).astype('float')
    mask2 = (data_tmp1 < data_tmp2).astype('float')
    npout_grad1 = npout_grad * mask1 + npout_grad * mask2
    npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)

    assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
    assert_almost_equal(arr_grad2.asnumpy(), npout_grad2) 
Example 56
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
    """configure A: input --> conv --> deconv --> output.
       the convolution and deconvoluiton has similar parameter which ensure
       the input shape is the same as output, and the same weights between conv
       and deconv;
       If the input value of forward() and backwrad() is the same, then
       the output value of them should also the same;
    """
    assert input_shape[1] == num_filter
    data = mx.sym.Variable(name="data")
    conv = mx.sym.Convolution(
        data=data, kernel=kernel, stride=stride, pad=pad,
        num_filter=num_filter, no_bias = "true", name = "conv")
    deconv = mx.sym.Deconvolution(
        data=conv, kernel=kernel, stride=stride, pad=pad,
        num_filter=num_filter, no_bias = "true", name = "deconv")

    arg_names = deconv.list_arguments()
    arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
    input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
    out_grad = input_data
    args = {}
    args["data"] = input_data
    args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
        (num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
    args_grad = [mx.nd.empty(s) for s in arg_shapes]

    exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
    exe.forward(is_train=True)
    out = exe.outputs[0].asnumpy()
    exe.backward(out_grad)
    assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)

    args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
    args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
    exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
    exe.forward(is_train=True)
    out = exe.outputs[0].asnumpy()
    exe.backward(out_grad)
    assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3) 
Example 57
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
    sample_num = 200
    for i in range(sample_num):
        d = gen_data(i)
        out = np.random.random((d[0] + d[1]).shape)

        def reduce_op(shape, x):
            if shape == x.shape:
                return x
            keepdims_shape = list(x.shape)
            for i in range(len(shape)):
                if x.shape[i] != shape[i]:
                    keepdims_shape[i] = 1
                    x = np.sum(x, axis=i).reshape(keepdims_shape)
            return x

        baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
        x_1 = reduce_op(d[0].shape, baseline_grad1)
        x_2 = reduce_op(d[1].shape, baseline_grad2)
        y_1 = mx.nd.empty(d[0].shape)
        y_2 = mx.nd.empty(d[1].shape)
        y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
                        args_grad=[y_1, y_2])
        y.forward(is_train=True)
        y.backward([mx.nd.array(out)])
        assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
        assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol) 
Example 58
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_broadcast():
    sample_num = 200
    for i in range(sample_num):
        # Generate random data that has ndim between 1-7 and all the shape dims between 1-5
        ndim = np.random.randint(1, 6)
        target_shape = np.random.randint(1, 6, size=(ndim,))
        axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
        shape = target_shape.copy()
        size = tuple([shape[ele] for ele in axis])
        for ele in axis:
            shape[ele] = 1
        a = mx.symbol.Variable('a')
        sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
        sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
        sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
        def test_broadcasting_ele(sym_bcast):
            dat_npy = np.random.rand(*shape)
            groundtruth = dat_npy
            grad_nd = mx.nd.empty(shape)
            outgrad_npy = np.random.rand(*target_shape)
            grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
                                          numpy_reduce_func=np.sum)
            net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
                                                 args_grad={'a': grad_nd})
            net.forward(is_train=True)
            assert (net.outputs[0].shape == target_shape).all()
            assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
            net.backward(out_grads=mx.nd.array(outgrad_npy))
            assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
        test_broadcasting_ele(sym_bcast_axis)
        test_broadcasting_ele(sym_bcast_to)
        test_broadcasting_ele(sym_bcast_like) 
Example 59
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_slice_axis():
    for ndim in range(1, 6):
        shape = np.random.randint(1, 11, size=(ndim,))
        for t in range(ndim):
            d = shape[t]
            b = random.randint(0, d-1)
            e = random.randint(b+1, d)
            if np.random.rand() > 0.6:
                e = None
            else:
                if e < d and np.random.rand() > 0.5:
                    e = e - d
            if np.random.rand() > 0.5:
                b = b - d
            idx = []
            for i in range(ndim):
                idx.append(slice(0, shape[i]))
            idx[t] = slice(b, e)

            X = mx.symbol.Variable('X')
            x = mx.nd.array(np.random.normal(size=shape))
            Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)

            xgrad = mx.nd.empty(x.shape)
            exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
            exec1.forward(is_train=True)
            y = exec1.outputs[0]
            assert_allclose(x.asnumpy()[idx], y.asnumpy())
            exec1.backward([y])
            xx = x.asnumpy()
            xx[:] = 0.0
            xx[idx] = x.asnumpy()[idx]
            assert_allclose(xx, xgrad.asnumpy())
            x_grad_npy = np.random.normal(size=x.shape)
            xgrad = mx.nd.array(x_grad_npy)
            exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
            exec2.forward(is_train=True)
            exec2.backward([exec2.outputs[0]])
            xx = np.zeros(shape=x.shape, dtype=np.float32)
            xx[idx] = x.asnumpy()[idx]
            assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5) 
Example 60
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_support_vector_machine_l1_svm():
    xpu = default_context()
    shape = (20, 10)

    X = mx.symbol.Variable('X')
    L = mx.symbol.Variable('L')
    Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
    x = mx.nd.empty(shape, ctx = xpu)
    l = mx.nd.empty((shape[0],), ctx = xpu)
    x_np = np.random.rand(*shape)
    l_np = np.random.randint(0, shape[1], (shape[0],))
    x[:] = x_np
    l[:] = l_np

    grad = mx.nd.empty(shape, ctx = xpu)
    exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
    exec1.forward(is_train=True)

    assert_almost_equal(x_np, exec1.outputs[0].asnumpy())

    exec1.backward()

    l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
    l_mask = np.array(l_mask, dtype=np.float32)*2 -1
    grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)

    assert_almost_equal(grad_np, grad.asnumpy()) 
Example 61
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_support_vector_machine_l2_svm():
    xpu = default_context()
    shape = (20, 10)

    X = mx.symbol.Variable('X')
    L = mx.symbol.Variable('L')
    Y = mx.symbol.SVMOutput(data=X, label=L)
    x = mx.nd.empty(shape, ctx = xpu)
    l = mx.nd.empty((shape[0],), ctx = xpu)
    x_np = np.random.rand(*shape)
    x_np = x_np.astype(np.float32)
    l_np = np.random.randint(0, shape[1], (shape[0],))
    x[:] = x_np
    l[:] = l_np

    grad = mx.nd.empty(shape, ctx = xpu)
    exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
    exec1.forward(is_train=True)

    assert_almost_equal(x_np, exec1.outputs[0].asnumpy())

    exec1.backward()

    l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
    l_mask = np.array(l_mask, dtype=np.float32)*2 -1
    grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
    grad_np = grad_np.astype(np.float32)
    assert_almost_equal(grad_np, grad.asnumpy())


# Seed set because the test is not robust enough to operate on random data 
Example 62
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:] = data_init
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:] = 3

    test = forward_mxnet_call(data)
    exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout = forward_numpy_call(data_tmp)
    assert_almost_equal(out, npout)

    out_grad = mx.nd.empty(shape)
    out_grad[:] = grad_init
    npout_grad = out_grad.asnumpy()
    temp = backward_numpy_call(data_tmp)
    npout_grad = npout_grad * temp
    exe_test.backward(out_grad)
    arr_grad = arr_grad.asnumpy()
    # print(name)
    # print(arr_grad)
    # print(npout_grad)
    assert_almost_equal(arr_grad, npout_grad) 
Example 63
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_adaptive_avg_pool_op():
    def py_adaptive_avg_pool(x, height, width):
        # 2D per frame adaptive avg pool
        def adaptive_avg_pool_frame(x, y):
            isizeH, isizeW = x.shape
            osizeH, osizeW = y.shape
            for oh in range(osizeH):
                istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
                iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
                kH = iendH - istartH
                for ow in range(osizeW):
                    istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
                    iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
                    kW = iendW - istartW
                    xsum = 0
                    for ih in range(kH):
                        for iw in range(kW):
                            xsum += x[istartH+ih][istartW+iw]
                    y[oh][ow] = xsum / kH / kW

        B,C,_,_ = x.shape
        y = np.empty([B,C,height, width], dtype=x.dtype)
        for b in range(B):
            for c in range(C):
                adaptive_avg_pool_frame(x[b][c], y[b][c])
        return y
    def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
        x = mx.nd.random.uniform(shape=shape)
        if output_width is None:
            y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
            npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
        else:
            y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
            npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
        assert_almost_equal(y.asnumpy(), npy)
    shape = (2, 2, 10, 10)
    for i in range(1, 11):
        check_adaptive_avg_pool_op(shape, i)
        for j in range(1, 11):
            check_adaptive_avg_pool_op(shape, i, j) 
Example 64
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_bilinear_resize_op():
    def py_bilinear_resize(x, outputHeight, outputWidth):
        batch, channel, inputHeight, inputWidth = x.shape
        if outputHeight == inputHeight and outputWidth == inputWidth:
            return x
        y = np.empty([batch, channel, outputHeight, outputWidth])
        rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
        rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
        for h2 in range(outputHeight):
            h1r = 1.0 * h2 * rheight
            h1 = int(np.floor(h1r))
            h1lambda = h1r - h1
            h1p = 1 if h1 < (inputHeight - 1) else 0
            for w2 in range(outputWidth):
                w1r = 1.0 * w2 * rwidth
                w1 = int(np.floor(w1r))
                w1lambda = w1r - w1
                w1p = 1 if w1 < (inputHeight - 1) else 0
                for b in range(batch):
                    for c in range(channel):
                        y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
                            w1lambda*x[b][c][h1][w1+w1p]) + \
                            h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
                            w1lambda*x[b][c][h1+h1p][w1+w1p])
        return y
    def check_bilinear_resize_op(shape, height, width):
        x = mx.nd.random.uniform(shape=shape)
        y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
        assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
    shape = (2, 2, 10, 10)
    check_bilinear_resize_op(shape, 5, 5)
    check_bilinear_resize_op(shape, 10, 10)
    check_bilinear_resize_op(shape, 15, 15)
    check_bilinear_resize_op(shape, 3, 7)
    check_bilinear_resize_op(shape, 13, 17) 
Example 65
Project: spacesense   Author: spacesense-ai   File: test_utils.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_get_NDVI():
    """
        Test the get_NDVI function
    """

    """Test shape == 1  : one Pixel"""
    data = np.array([1, 2, 3])
    assert get_NDVI(data, 1, 2) == -1 / 5

    """Test shape == 2  : one line"""

    data = np.ndarray((3, 3))
    data[:, 0] = 1
    data[:, 1] = 2
    data[:, 2] = 3

    assert np.all(get_NDVI(data, 1, 2) == -1 / 5)

    """Test shape == 3  : one image"""

    data = np.ndarray((3, 2, 3))
    data[:, :, 0] = 1
    data[:, :, 1] = 2
    data[:, :, 2] = 3

    ndvi = get_NDVI(data, 1, 2)
    assert ndvi.shape == (3, 2)
    assert np.all(ndvi == -1 / 5)

    """ test edge case : data too large"""
    with pytest.raises(RuntimeError):
        get_NDVI(np.empty((4, 5, 6, 7, 8, 9))) 
Example 66
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 4 votes vote down vote up
def comma_batch_generator(data, batch_size, augment):

    """
    Generate training images given image paths and associated steering angles

    :param data         : (numpy.array) the loaded data (converted to list from pandas format)
    :param batch_size   :  (int) batch size for training
    :param training     : (boolean): whether to use augmentation or not.

    :rtype: Iterator[images, angles] images for training
    the corresponding steering angles

    """

    images = np.empty([batch_size, configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, configs.CHANNELS], dtype=np.int32)
    labels = np.empty([batch_size])

    while True:

        c = 0

        for index in np.random.permutation(data.shape[0]):

            imgs = np.empty([configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, configs.CHANNELS], dtype=np.int32)

            if index < configs.LENGTH:
                start = 0
                end = configs.LENGTH
            elif index + configs.LENGTH >= len(data):
                start = len(data) - configs.LENGTH - 1
                end = len(data) - 1
            else:
                start = index
                end = index + configs.LENGTH

            for i in range(start, end):
                center_path = "/home/neil/dataset/speedchallenge/data/train/" + str(data[i][1])
                image = load_image(center_path)
                imgs[i - start] = image

            # augmentaion if needed
            if augment and bool(random.getrandbits(1)):
                imgs = augument(imgs)

            angle = data[end][2]

            images[c] = imgs
            labels[c] = angle

            c += 1

            if c == batch_size:
                break

        yield images, labels 
Example 67
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 4 votes vote down vote up
def comma_accel_batch_generator(data, batch_size, augment=False):

    """
    Generate training images given image paths and associated acceleration commands

    :param data         : (numpy.array) the loaded data (converted to list from pandas format)
    :param batch_size   :  (int) batch size for training
    :param training     : (boolean): whether to use augmentation or not.

    :rtype: Iterator[images, angles] images for training
    the corresponding steering angles

    """

    images = np.empty([batch_size, configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, configs.CHANNELS], dtype=np.int32)
    labels = np.empty([batch_size])

    while True:

        c = 0

        for index in np.random.permutation(data.shape[0]):

            imgs = np.empty([configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, configs.CHANNELS], dtype=np.int32)

            if index < configs.LENGTH:
                start = 0
                end = configs.LENGTH
            elif index + configs.LENGTH >= len(data):
                start = len(data) - configs.LENGTH - 1
                end = len(data) - 1
            else:
                start = index
                end = index + configs.LENGTH

            for i in range(start, end):
                center_path = "/home/neil/dataset/speedchallenge/data/train/" + str(data[i][1])
                image = load_image(center_path)
                imgs[i - start] = image

            # augmentaion if needed (not recommended)
            if augment and bool(random.getrandbits(1)):
                imgs = augument(imgs)
            speed = data[end][2]
            pre_speed = data[end-1][2]

            images[c] = imgs
            labels[c] = (speed - pre_speed) * 20 # frame rate
            c += 1

            if c == batch_size:
                break

        yield images, labels 
Example 68
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 4 votes vote down vote up
def comma_flow_accel_batch_gen(batch_size, data):
    """
    Generate training images given image paths and associated steering angles

       :param data         : (numpy.array) the loaded data (converted to list from pandas format)
       :param batch_size   :  (int) batch size for training

       :rtype: Iterator[images, angles] images for training the corresponding steering angles
    """

    images = np.empty([batch_size, configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 2], dtype=np.int32)
    labels = np.empty([batch_size])

    while True:

        c = 0

        for index in np.random.permutation(data.shape[0]):

            imgs = np.empty([configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 2], dtype=np.int32)

            if index < configs.LENGTH + 1:
                start = 0
                end = configs.LENGTH + 1
            elif index + configs.LENGTH + 1 >= len(data):
                start = len(data) - configs.LENGTH - 1
                end = len(data) - 1
            else:
                start = index
                end = index + configs.LENGTH + 1

            grays = []
            for i in range(start, end):
                path = "/home/neil/dataset/speedchallenge/data/train/" + str(data[i][1])
                gray = load_gray_image(path)
                grays.append(gray)

            current = grays[0]
            for i in range(1, len(grays)):
                flow = cv2.calcOpticalFlowFarneback(current, grays[i], None, 0.5, 3, 15, 3, 5, 1.5, 0)
                current = grays[i]
                imgs[i - 1] = flow

            accel = (data[end][2] - data[end-1][2]) * 20
            images[c] = imgs
            labels[c] = accel
            c += 1

            if c == batch_size:
                break

        yield images, labels 
Example 69
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 4 votes vote down vote up
def comma_flow_batch_gen(data, batch_size):

    """
    Generate training images given image paths and associated steering angles

    :param data         : (numpy.array) the loaded data (converted to list from pandas format)
    :param batch_size   :  (int) batch size for training

    :rtype: Iterator[images, angles] images for training
    the corresponding steering angles

    """

    images = np.empty([batch_size, configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 2], dtype=np.int32)
    labels = np.empty([batch_size])

    while True:

        c = 0

        for index in np.random.permutation(data.shape[0]):

            imgs = np.empty([configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 2], dtype=np.int32)

            if index < configs.LENGTH + 1:
                start = 0
                end = configs.LENGTH + 1
            elif index + configs.LENGTH + 1 >= len(data):
                start = len(data) - configs.LENGTH - 1
                end = len(data) - 1
            else:
                start = index
                end = index + configs.LENGTH + 1

            grays = []
            for i in range(start, end):
                path = "/home/neil/dataset/speedchallenge/data/train/" + str(data[i][1])
                gray = load_gray_image(path)
                grays.append(gray)

            current = grays[0]
            for i in range(1, len(grays)):
                flow = cv2.calcOpticalFlowFarneback(current, grays[i], None, 0.5, 3, 15, 3, 5, 1.5, 0)
                current = grays[i]
                imgs[i-1] = flow

            speed = data[end][2]
            images[c] = imgs
            labels[c] = speed

            c += 1

            if c == batch_size:
                break

        yield images, labels 
Example 70
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 4 votes vote down vote up
def udacity_train_gen(data, batch_size, augment):

    """
    Generate training images given image paths and associated steering angles

    Args:
         data (numpy.array)        : the loaded data (converted to list from pandas format)
         batch_size (int)   : batch size for training
         training: (boolean): whether to use augmentation or not.

    Yields:
         images ([tensor])  : images for training
         angles ([float])   : the corresponding steering angles


    """

    images = np.empty([batch_size, configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 3], dtype=np.int32)
    labels = np.empty([batch_size])

    while True:

        c = 0

        for index in np.random.permutation(data.shape[0]):

            imgs = np.empty([configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 3], dtype=np.int32)

            if index < configs.LENGTH:
                start = 0
                end = configs.LENGTH
            elif index + configs.LENGTH >= len(data):
                start = len(data) - configs.LENGTH - 1
                end = len(data) - 1
            else:
                start = index
                end = index + configs.LENGTH

            for i in range(start, end):
                center_path = str(data[i][5])
                image = load_image(center_path)
                imgs[i - start] = image

            # augmentaion if needed
            if augment and bool(random.getrandbits(1)):
                imgs, angle = augument(imgs, data[end][8])
            else:
                angle = data[end][8]

            images[c] = imgs
            labels[c] = angle

            c += 1

            if c == batch_size:
                break

        yield images, labels 
Example 71
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: minibatch.py    MIT License 4 votes vote down vote up
def get_minibatch(roidb, num_classes):
  """Given a roidb, construct a minibatch sampled from it."""
  num_images = len(roidb)
  # Sample random scales to use for each image in this batch
  random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
                  size=num_images)
  assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
    'num_images ({}) must divide BATCH_SIZE ({})'. \
    format(num_images, cfg.TRAIN.BATCH_SIZE)

  # Get the input image blob, formatted for caffe
  im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

  blobs = {'data': im_blob}

  assert len(im_scales) == 1, "Single batch only"
  assert len(roidb) == 1, "Single batch only"
  
  # gt boxes: (x1, y1, x2, y2, cls)
  #if cfg.TRAIN.USE_ALL_GT:
    # Include all ground truth boxes
  #  gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
  #else:
    # For the COCO ground truth boxes, exclude the ones that are ''iscrowd'' 
  #  gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
  #gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
  #gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
  #gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
  boxes = roidb[0]['boxes'] * im_scales[0]
  batch_ind = 0 * np.ones((boxes.shape[0], 1))
  boxes = np.hstack((batch_ind, boxes))
  DEDUP_BOXES=1./16.
  if DEDUP_BOXES > 0:
    v = np.array([1,1e3, 1e6, 1e9, 1e12])
    hashes = np.round(boxes * DEDUP_BOXES).dot(v)
    _, index, inv_index = np.unique(hashes, return_index=True,
                                    return_inverse=True)
    boxes = boxes[index, :]
  
  blobs['boxes'] = boxes
  blobs['im_info'] = np.array(
    [im_blob.shape[1], im_blob.shape[2], im_scales[0]],
    dtype=np.float32)
  blobs['labels'] = roidb[0]['labels']

  return blobs 
Example 72
Project: ieml   Author: IEMLdev   File: _test_tables.py    GNU General Public License v3.0 4 votes vote down vote up
def test_additive_script(self):
        script = self.parser.parse("O:B:.+M:S:A:+S:.")
        tables = script.tables

        paradigm1 = self.parser.parse("O:B:.")
        paradigm2 = self.parser.parse("M:S:A:+S:.")
        row_headers_table1 = [self.parser.parse("O:B:.")]

        row_headers_table2 = [self.parser.parse("S:S:A:+S:."), self.parser.parse("B:S:A:+S:."), self.parser.parse("T:S:A:+S:.")]
        col_headers_table2 = [self.parser.parse("M:S:A:."), self.parser.parse("M:S:S:.")]

        table1_cells = np.empty(2, dtype="object")
        table1_cells[0] = self.parser.parse("U:B:.")
        table1_cells[1] = self.parser.parse("A:B:.")

        table2_cells = np.empty((3, 2), dtype="object")
        table2_cells[0][0] = self.parser.parse("S:S:A:.")
        table2_cells[0][1] = self.parser.parse("S:S:S:.")

        table2_cells[1][0] = self.parser.parse("B:S:A:.")
        table2_cells[1][1] = self.parser.parse("B:S:S:.")

        table2_cells[2][0] = self.parser.parse("T:S:A:.")
        table2_cells[2][1] = self.parser.parse("T:S:S:.")

        self.assertEqual(len(tables), 2, "Correct number of tables generated")
        self.assertTrue(tables[0].cells.shape == table1_cells.shape, "First table has the correct shape")
        self.assertTrue(tables[1].cells.shape == table2_cells.shape, "Second table has the correct shape")

        row_col_h = list(tables[0].headers.values())[0]

        self.assertEqual(row_col_h[0], row_headers_table1, "Row headers are generated correctly")
        self.assertTrue(len(row_col_h[1]) == 0, "First table has no column headers")
        self.assertTrue(len(list(tables[0].headers)) == 1, "First table has no tab headers")

        self.assertTrue((tables[0].cells == table1_cells).all(), "Cells are generated correctly")

        row_col_h = list(tables[1].headers.values())[0]

        self.assertEqual(row_col_h[0], row_headers_table2, "Row headers are generated correctly")
        self.assertEqual(row_col_h[1], col_headers_table2, "Column headers are generated correctly")
        self.assertTrue(len(list(tables[0].headers)) == 1, "Second table has no tab headers")

        self.assertTrue((tables[1].cells == table2_cells).all(), "Cells are generated correctly")
        self.assertTrue(tables[0].paradigm == paradigm1, "First table has correct paradigm")
        self.assertTrue(tables[1].paradigm == paradigm2, "Second table has correct paradigm") 
Example 73
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 4 votes vote down vote up
def run(self, *in_arrays,
        return_as_list  = False,    # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
        print_progress  = False,    # Print progress to the console? Useful for very large input arrays.
        minibatch_size  = None,     # Maximum minibatch size to use, None = disable batching.
        num_gpus        = 1,        # Number of GPUs to use.
        out_mul         = 1.0,      # Multiplicative constant to apply to the output(s).
        out_add         = 0.0,      # Additive constant to apply to the output(s).
        out_shrink      = 1,        # Shrink the spatial dimensions of the output(s) by the given factor.
        out_dtype       = None,     # Convert the output to the specified data type.
        **dynamic_kwargs):          # Additional keyword arguments to pass into the network construction function.

        assert len(in_arrays) == self.num_inputs
        num_items = in_arrays[0].shape[0]
        if minibatch_size is None:
            minibatch_size = num_items
        key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])

        # Build graph.
        if key not in self._run_cache:
            with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):
                in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
                out_split = []
                for gpu in range(num_gpus):
                    with tf.device('/gpu:%d' % gpu):
                        out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
                        if out_mul != 1.0:
                            out_expr = [x * out_mul for x in out_expr]
                        if out_add != 0.0:
                            out_expr = [x + out_add for x in out_expr]
                        if out_shrink > 1:
                            ksize = [1, 1, out_shrink, out_shrink]
                            out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr]
                        if out_dtype is not None:
                            if tf.as_dtype(out_dtype).is_integer:
                                out_expr = [tf.round(x) for x in out_expr]
                            out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
                        out_split.append(out_expr)
                self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]

        # Run minibatches.
        out_expr = self._run_cache[key]
        out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
        for mb_begin in range(0, num_items, minibatch_size):
            if print_progress:
                print('\r%d / %d' % (mb_begin, num_items), end='')
            mb_end = min(mb_begin + minibatch_size, num_items)
            mb_in = [src[mb_begin : mb_end] for src in in_arrays]
            mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in)))
            for dst, src in zip(out_arrays, mb_out):
                dst[mb_begin : mb_end] = src

        # Done.
        if print_progress:
            print('\r%d / %d' % (num_items, num_items))
        if not return_as_list:
            out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
        return out_arrays

    # Returns a list of (name, output_expr, trainable_vars) tuples corresponding to
    # individual layers of the network. Mainly intended to be used for reporting. 
Example 74
Project: skylab   Author: coenders   File: basellh.py    GNU General Public License v3.0 4 votes vote down vote up
def _active_region(self, src_ra, src_dec, ts, beta, inj, n_iter, trials,
                       logger, **kwargs):
        if len(trials) > 0:
            n_inj = int(np.mean(trials["n_inj"]))
        else:
            n_inj = 0

        logger.info("Quick estimate of active region...")
        logger.info("Start with {0:d} events.".format(n_inj + 1))

        stop = False
        while not stop:
            n_inj, inject = inj.sample(
                src_ra, n_inj + 1, poisson=False).next()

            fmin, pbest = self.fit_source(
                src_ra, src_dec, scramble=True, inject=inject)

            trial = np.empty((1, ), dtype=trials.dtype)
            trial["n_inj"] = n_inj
            trial["TS"] = fmin

            for key in self.params:
                trial[key] = pbest[key]

            trials = np.append(trials, trial)

            mts = np.bincount(trials["n_inj"], weights=trials["TS"])
            mw = np.bincount(trials["n_inj"])
            mts[mw > 0] /= mw[mw > 0]

            residuals = mts - ts

            stop = (
                np.count_nonzero(residuals > 0.)/len(residuals) > beta or
                np.all(residuals > 0.)
                )

        mu = len(mts) * beta
        logger.info("Active region: mu = {0:.1f}".format(mu))

        # Do trials around active region.
        trials = np.append(
            trials, self.do_trials(
                src_ra, src_dec, n_iter,
                mu=inj.sample(src_ra, mu), **kwargs))

        return trials 
Example 75
Project: skylab   Author: coenders   File: grbllh.py    GNU General Public License v3.0 4 votes vote down vote up
def _select_events(self, src_ra, src_dec, scramble=False, inject=None):
        r"""Select events for log-likelihood evaluation.

        If `scramble` is `True`, `nbackground` (plus Poisson
        fluctuations) events are selected from the off-source time
        range. Otherwise, the on-source events ``data["on"]`` are
        selected.

        Note
        ----
        In the current implementation, the selection depends only on the
        on-source time range. Hence, `src_ra` and `src_dec` are ignored.

        """
        # We will chose new events, so it is time to clean the likelihood
        # model's cache.
        self.llh_model.reset()

        if scramble:
            N = self.random.poisson(self.nbackground)

            if N > 0:
                self._events = self.random.choice(self.data["off"], N)
                self._events["ra"] = self.random.uniform(0., 2.*np.pi, N)
            else:
                self._events = np.empty(0, dtype=self.data["off"].dtype)
        else:
            self._events = self.data["on"]

        if inject is not None:
            remove = np.logical_or(
                inject["sinDec"] < self.llh_model.sinDec_range[0],
                inject["sinDec"] > self.llh_model.sinDec_range[-1])

            if np.any(remove):
                inject = inject[np.logical_not(remove)]

            inject = numpy.lib.recfunctions.append_fields(
                inject, names="B", data=self.llh_model.background(inject),
                usemask=False)

            self._events = np.append(
                self._events, inject[list(self._events.dtype.names)])

        self._signal = self.llh_model.signal(src_ra, src_dec, self._events)

        # Method has to set number of events and number of selected
        # events. Here, both numbers are equal.
        self._nevents = self._events.size
        self._nselected = self._nevents 
Example 76
Project: sdpqpy   Author: cgogolin   File: ed.py    GNU General Public License v3.0 4 votes vote down vote up
def getXMat(self, variables, monomials):
        V = self.getSize()
        if self.spin == 0.5:
            spin_multiplicity = 2
        elif self.spin == 0:
            spin_multiplicity = 1
        else:
            raise Exception("Only spin 1/2 and spin 0 implemented!")

        
        print("generating uplifted ground state")
        upliftedgroundstate = np.zeros(int(pow(2, spin_multiplicity*V)))
        
        for row, vec in enumerate(self.getHilbertSpace()):
            hdict = self.getHdictFull()
            col = hdict[tuple(vec)]
            upliftedgroundstate[col] = self.groundstate[row]

        print("generating xmat entries")
        time0 = time.time()
        monomialvec = self.getMonomialVector(variables, monomials)
        with multiprocessing.Pool() as pool:
            # this makes keyboard interrupt work, see:
            # http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
            m = pool.map_async(
                ft.partial(npdotinverted, upliftedgroundstate), monomialvec).get(0xFFFF)
            pool.close()
            pool.join()

        output = np.empty([len(m), len(m)])
        with multiprocessing.Pool() as pool2:
            m2 = pool2.imap(npstardot, it.product(m, repeat=2))
            for i, out in enumerate(m2, 1):
                row = (i - 1) % len(m)
                col = (i - 1 - row) / len(m)
                if row >= col:
                    output[row, col] = output[col, row] = out
                    sys.stdout.write("\r\x1b[Kprocessed " + str(i) + " xmat entries of " + str(
                        len(m) * len(m)) + " in " + str(time.time() - time0) + " seconds ")
                    sys.stdout.flush()
            pool2.close()
            pool2.join()

        print("done")
        return np.array(output, dtype=float) 
Example 77
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ndarray.py    Apache License 2.0 4 votes vote down vote up
def concatenate(arrays, axis=0, always_copy=True):
    """DEPRECATED, use ``concat`` instead

    Parameters
    ----------
    arrays : list of `NDArray`
        Arrays to be concatenate. They must have identical shape except
        the first dimension. They also must have the same data type.
    axis : int
        The axis along which to concatenate.
    always_copy : bool
        Default `True`. When not `True`, if the arrays only contain one
        `NDArray`, that element will be returned directly, avoid copying.

    Returns
    -------
    NDArray
        An `NDArray` that lives on the same context as `arrays[0].context`.
    """
    assert isinstance(arrays, list)
    assert len(arrays) > 0
    assert isinstance(arrays[0], NDArray)

    if not always_copy and len(arrays) == 1:
        return arrays[0]

    shape_axis = arrays[0].shape[axis]
    shape_rest1 = arrays[0].shape[0:axis]
    shape_rest2 = arrays[0].shape[axis+1:]
    dtype = arrays[0].dtype
    for arr in arrays[1:]:
        shape_axis += arr.shape[axis]
        assert shape_rest1 == arr.shape[0:axis]
        assert shape_rest2 == arr.shape[axis+1:]
        assert dtype == arr.dtype
    ret_shape = shape_rest1 + (shape_axis,) + shape_rest2
    ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype)

    idx = 0
    begin = [0 for _ in ret_shape]
    end = list(ret_shape)
    for arr in arrays:
        if axis == 0:
            ret[idx:idx+arr.shape[0]] = arr
        else:
            begin[axis] = idx
            end[axis] = idx+arr.shape[axis]
            # pylint: disable=no-member,protected-access
            _internal._crop_assign(ret, arr, out=ret,
                                   begin=tuple(begin),
                                   end=tuple(end))
            # pylint: enable=no-member,protected-access
        idx += arr.shape[axis]

    return ret


# pylint: disable=redefined-outer-name 
Example 78
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def check_concat_with_shape(shapes, dimension, skip_second):
    # if skip_second is True, second argument will not have gradient.
    # it is to test #1130
    n = len(shapes)
    # forward
    target_dim = 0
    for shape in shapes:
        target_dim += shape[dimension]

    inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
    out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
    arr = [mx.nd.empty(shape) for shape in shapes]
    for i in range(n):
        arr[i][:] = shapes[i][dimension]
    arr_np = [np.copy(narray.asnumpy()) for narray in arr]
    arr_grad = [mx.nd.empty(shape) for shape in shapes]
    dict_grad = {}
    arg_names = out.list_arguments()

    for name, g in zip(arg_names, arr_grad):
        if not skip_second or name != 'arg1':
            dict_grad[name] = g

    args = out.list_arguments()
    arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
    out_grad = mx.nd.empty(out_shapes[0])
    exec1 = out.bind(default_context(),
                     args=arr,
                     args_grad=dict_grad)
    exec1.forward(is_train=True)
    out1 = exec1.outputs[0]
    ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
    assert_almost_equal(out1.asnumpy(), ret)
    # backward
    out1.copyto(out_grad)
    out_grad[:] += 1
    exec1.backward([out_grad])

    for i, name in enumerate(arg_names):
        if not skip_second or name != 'arg1':
            grad = dict_grad[name]
            np_grad = arr_np[i]
            assert_almost_equal(grad.asnumpy(), np_grad + 1) 
Example 79
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def mathematical_core_binary(name,
                             forward_mxnet_call,
                             forward_numpy_call,
                             backward_numpy_call1,
                             backward_numpy_call2,
                             data1_init=2.,
                             data2_init=3.,
                             grad_init=2.):
    data1 = mx.symbol.Variable('data')
    data2 = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp1 = np.random.rand(3, 4)
    data_tmp2 = np.random.rand(3, 4)
    data_tmp1[:] = data1_init
    data_tmp2[:] = data2_init

    arr_data1 = mx.nd.array(data_tmp1)
    arr_data2 = mx.nd.array(data_tmp2)

    arr_grad1 = mx.nd.empty(shape)
    arr_grad2 = mx.nd.empty(shape)

    test = forward_mxnet_call(data1, data2)
    exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout = forward_numpy_call(data_tmp1, data_tmp2)
    assert_almost_equal(out, npout)

    out_grad = mx.nd.empty(shape)
    out_grad[:] = grad_init
    exe_test.backward(out_grad)

    npout_grad = np.ones(shape)
    npout_grad[:] = grad_init

    npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
    npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
    arr_grad1 = arr_grad1.asnumpy()
    arr_grad2 = arr_grad2.asnumpy()

    assert_almost_equal(arr_grad1, npout_grad1)
    assert_almost_equal(arr_grad2, npout_grad2) 
Example 80
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 3 votes vote down vote up
def optical_flow(previous, current):

    gray1 = cv2.cvtColor(previous, cv2.COLOR_RGB2GRAY)
    gray2 = cv2.cvtColor(current, cv2.COLOR_RGB2GRAY)
    flow = cv2.calcOpticalFlowFarneback(gray1, gray2, None, 0.5, 3, 15, 3, 5, 1.5, 0)

    return flow

#
# def comma_validation_generator(data, batch_size):
#
#     """
#     Generate training images given image paths and associated steering angles
#
#     :param data         : (numpy.array) the loaded data (converted to list from pandas format)
#     :param batch_size   :  (int) batch size for training
#
#     :rtype: Iterator[images, angles] images for training
#     the corresponding steering angles
#
#     """
#
#     images = np.empty([batch_size, configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, configs.CHANNELS], dtype=np.int32)
#     labels = np.empty([batch_size])
#
#     while True:
#
#         c = 0
#
#         for index in np.random.permutation(data.shape[0]):
#
#             imgs = np.empty([configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, configs.CHANNELS], dtype=np.int32)
#
#             if index < configs.LENGTH:
#                 start = 0
#                 end = configs.LENGTH
#             elif index + configs.LENGTH >= len(data):
#                 start = len(data) - configs.LENGTH - 1
#                 end = len(data) - 1
#             else:
#                 start = index
#                 end = index + configs.LENGTH
#
#             for i in range(start, end):
#                 center_path = "/home/neil/dataset/speedchallenge/data/train/" + str(data[i][1])
#                 image = load_image(center_path)
#                 imgs[i - start] = image
#
#             images[c] = imgs
#             labels[c] = data[end][2]
#
#             c += 1
#
#             if c == batch_size:
#                 break
#
#         yield images, labels