Python numpy.size() Examples

The following are 30 code examples of numpy.size(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: utils.py    From Tensorflow-YOLOv3 with MIT License 9 votes vote down vote up
def draw_boxes_frame(frame, frame_size, boxes_dicts, class_names, input_size):
  """Draws detected boxes in a video frame"""
  boxes_dict = boxes_dicts[0]
  resize_factor = (frame_size[0] / input_size[1], frame_size[1] / input_size[0])
  for cls in range(len(class_names)):
    boxes = boxes_dict[cls]
    color = (0, 0, 255)
    if np.size(boxes) != 0:
      for box in boxes:
        xy = box[:4]
        xy = [int(xy[i] * resize_factor[i % 2]) for i in range(4)]
        cv2.rectangle(frame, (xy[0], xy[1]), (xy[2], xy[3]), color[::-1], 2)
        (test_width, text_height), baseline = cv2.getTextSize(class_names[cls],
                                                              cv2.FONT_HERSHEY_SIMPLEX,
                                                              0.75, 1)
        cv2.rectangle(frame,
                      (xy[0], xy[1]),
                      (xy[0] + test_width, xy[1] - text_height - baseline),
                      color[::-1],
                      thickness=cv2.FILLED)
        cv2.putText(frame, class_names[cls], (xy[0], xy[1] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1) 
Example #2
Source File: quadPlot.py    From quadcopter-simulation with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def set_frame(frame):
    # convert 3x6 world_frame matrix into three line_data objects which is 3x2 (row:point index, column:x,y,z)
    lines_data = [frame[:,[0,2]], frame[:,[1,3]], frame[:,[4,5]]]
    ax = plt.gca()
    lines = ax.get_lines()
    for line, line_data in zip(lines[:3], lines_data):
        x, y, z = line_data
        line.set_data(x, y)
        line.set_3d_properties(z)

    global history, count
    # plot history trajectory
    history[count] = frame[:,4]
    if count < np.size(history, 0) - 1:
        count += 1
    zline = history[:count,-1]
    xline = history[:count,0]
    yline = history[:count,1]
    lines[-1].set_data(xline, yline)
    lines[-1].set_3d_properties(zline)
    # ax.plot3D(xline, yline, zline, 'blue') 
Example #3
Source File: test_numeric.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_count_nonzero_axis_consistent(self):
        # Check that the axis behaviour for valid axes in
        # non-special cases is consistent (and therefore
        # correct) by checking it against an integer array
        # that is then casted to the generic object dtype
        from itertools import combinations, permutations

        axis = (0, 1, 2, 3)
        size = (5, 5, 5, 5)
        msg = "Mismatch for axis: %s"

        rng = np.random.RandomState(1234)
        m = rng.randint(-100, 100, size=size)
        n = m.astype(object)

        for length in range(len(axis)):
            for combo in combinations(axis, length):
                for perm in permutations(combo):
                    assert_equal(
                        np.count_nonzero(m, axis=perm),
                        np.count_nonzero(n, axis=perm),
                        err_msg=msg % (perm,)) 
Example #4
Source File: car_rental_synchronous.py    From reinforcement-learning-an-introduction with MIT License 6 votes vote down vote up
def policy_improvement(self, actions, values, policy):
        new_policy = np.copy(policy)

        expected_action_returns = np.zeros((MAX_CARS + 1, MAX_CARS + 1, np.size(actions)))
        cooks = dict()
        with mp.Pool(processes=8) as p:
            for action in actions:
                k = np.arange(MAX_CARS + 1)
                all_states = ((i, j) for i, j in itertools.product(k, k))
                cooks[action] = partial(self.expected_return_pi, values, action)
                results = p.map(cooks[action], all_states)
                for v, i, j, a in results:
                    expected_action_returns[i, j, self.inverse_actions[a]] = v
        for i in range(expected_action_returns.shape[0]):
            for j in range(expected_action_returns.shape[1]):
                new_policy[i, j] = actions[np.argmax(expected_action_returns[i, j])]

        policy_change = (new_policy != policy).sum()
        print(f'Policy changed in {policy_change} states')
        return policy_change, new_policy

    # O(n^4) computation for all possible requests and returns 
Example #5
Source File: olmar.py    From fin with MIT License 6 votes vote down vote up
def analyze(context=None, results=None):
        
    f, (ax1, ax2, ax3) = plt.subplots(3, sharex = True)        
    ax1.plot(results.portfolio_value, linewidth = 2.0, label = 'porfolio')
    ax1.set_title('On-Line Moving Average Reversion')
    ax1.set_ylabel('Portfolio value (USD)')
    ax1.legend(loc=0)
    ax1.grid(True)
            
    ax2.plot(results['AAPL'], color = 'b', linestyle = '-', linewidth = 2.0, label = 'AAPL')
    ax2.plot(results['MSFT'], color = 'r', linestyle = '-', linewidth = 2.0, label = 'MSFT')
    ax2.set_ylabel('stock price (USD)')
    ax2.legend(loc=0)
    ax2.grid(True)
    
    ax3.semilogy(results['step_size'], color = 'b', linestyle = '-', linewidth = 2.0, label = 'step-size')
    ax3.semilogy(results['variability'], color = 'r', linestyle = '-', linewidth = 2.0, label = 'variability')
    ax3.legend(loc=0)
    ax3.grid(True)
    
    plt.show() 
Example #6
Source File: kernels.py    From aboleth with Apache License 2.0 6 votes vote down vote up
def _init_lenscale(given_lenscale, learn_lenscale, input_dim):
    """Provide the lenscale variable and its initial value."""
    given_lenscale = (np.sqrt(1.0 / input_dim) if given_lenscale is None
                      else np.array(given_lenscale).squeeze()).astype(
                          np.float32)

    if learn_lenscale:
        lenscale = pos_variable(given_lenscale, name="kernel_lenscale")
        if np.size(given_lenscale) == 1:
            summary_scalar(lenscale)
        else:
            summary_histogram(lenscale)
    else:
        lenscale = given_lenscale

    lenscale_vec = tf.ones(input_dim, dtype=tf.float32) * lenscale
    init_lenscale = given_lenscale * np.ones(input_dim, dtype=np.float32)
    return lenscale_vec, init_lenscale 
Example #7
Source File: olmar.py    From fin with MIT License 6 votes vote down vote up
def initialize(context, eps = 10, window_length = 50):
    
    #init    
    context.stocks = STOCKS
    context.sids = SIDS
    #context.sids = [context.symbol(symb) for symb in context.stocks]
    context.m = np.size(STOCKS)
    context.price = {}
    context.b_t = np.ones(context.m)/float(context.m)
    context.prev_weights = np.ones(context.m)/float(context.m)
    context.eps = eps
    context.init = True
    context.days = 0
    context.window_length = window_length
    
    add_history(window_length, '1d', 'price')
    
    #set commision and slippage
    #context.set_commision(commission.PerShare(cost=0))
    #context.set_slippage(slippage.VolumeShareSlippage(volume_limit=0.25, price_impact=0.1)) 
Example #8
Source File: test_task.py    From ibllib with MIT License 6 votes vote down vote up
def test_roc_between_two_events(self):
        if self.test_data is None:
            return
        spike_times = self.test_data['spike_times']
        spike_clusters = self.test_data['spike_clusters']
        event_times = self.test_data['event_times']
        event_groups = self.test_data['event_groups']
        auc_roc, cluster_ids = bb.task.roc_between_two_events(spike_times,
                                                              spike_clusters,
                                                              event_times,
                                                              event_groups,
                                                              pre_time=0.5,
                                                              post_time=0.5)
        num_clusters = np.size(np.unique(spike_clusters))
        self.assertTrue(np.sum(auc_roc < 0.3) == 24)
        self.assertTrue(np.sum(auc_roc > 0.7) == 10)
        self.assertTrue(np.size(cluster_ids) == num_clusters) 
Example #9
Source File: kpts_helper.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def flush(self):
        """
        Composes the vector.
        Returns:
            The composed vector.
        """
        if self.__data__ is None:
            self.__data__ = result = np.empty(self.__total_size__, dtype=self.__dtype__)
            offset = 0
        else:
            offset = self.__data__.size
            self.__data__ = result = np.empty(self.__total_size__ + self.__data__.size, dtype=self.__dtype__)

        for i in self.__transactions__:
            s = i.size
            result[offset:offset + s] = i.reshape(-1)
            offset += s
        self.__transactions__ = []

        return result 
Example #10
Source File: test_function.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_count_uses_size_on_exception():
    class RaisingObjectException(Exception):
        pass

    class RaisingObject(object):

        def __init__(self, msg='I will raise inside Cython'):
            super(RaisingObject, self).__init__()
            self.msg = msg

        def __eq__(self, other):
            # gets called in Cython to check that raising calls the method
            raise RaisingObjectException(self.msg)

    df = DataFrame({'a': [RaisingObject() for _ in range(4)],
                    'grp': list('ab' * 2)})
    result = df.groupby('grp').count()
    expected = DataFrame({'a': [2, 2]}, index=pd.Index(
        list('ab'), name='grp'))
    tm.assert_frame_equal(result, expected)


# size
# -------------------------------- 
Example #11
Source File: core.py    From feets with MIT License 6 votes vote down vote up
def __repr__(self):
        """x.__repr__() <==> repr(x)."""
        if not hasattr(self, "__repr"):
            params = self.params or {}
            parsed_params = []
            for k, v in params.items():
                sk = str(k)
                if np.ndim(v) != 0 and np.size(v) > MAX_VALUES_TO_REPR:
                    tv = type(v)
                    sv = f"<{tv.__module__}.{tv.__name__}>"
                else:
                    sv = str(v)
                parsed_params.append(f"{sk}={sv}")
            str_params = ", ".join(parsed_params)
            self.__repr = f"{self.name}({str_params})"

        return self.__repr 
Example #12
Source File: data_loader.py    From DeblurGAN-tf with MIT License 6 votes vote down vote up
def read_image_pair(pair_path, resize_or_crop=None, image_size=(256,256)):
    image_blur = cv2.imread(pair_path[0], cv2.IMREAD_COLOR)
    image_blur = image_blur / 255.0 * 2.0 - 1.0
    image_real = cv2.imread(pair_path[1], cv2.IMREAD_COLOR)
    image_real = image_real / 255.0 * 2.0 - 1.0

    if resize_or_crop != None: 
        assert image_size != None

    if resize_or_crop == 'resize':
        image_blur = cv2.resize(image_blur, image_size, interpolation=cv2.INTER_AREA)
        image_real = cv2.resize(image_real, image_size, interpolation=cv2.INTER_AREA)
    elif resize_or_crop == 'crop':
        image_blur = cv2.crop(image_blur, image_size)
        image_real = cv2.crop(image_real, image_size)
    else:
        raise

    if np.size(np.shape(image_blur)) == 3:
        image_blur = np.expand_dims(image_blur, axis=0)
    if np.size(np.shape(image_real)) == 3:
        image_real = np.expand_dims(image_real, axis=0)
    image_blur = np.array(image_blur, dtype=np.float32)
    image_real = np.array(image_real, dtype=np.float32)
    return image_blur, image_real 
Example #13
Source File: data_loader.py    From DeblurGAN-tf with MIT License 6 votes vote down vote up
def read_image(path, resize_or_crop=None, image_size=(256,256)):
    image = cv2.imread(path, cv2.IMREAD_COLOR)
    image = image/255.0 * 2.0 - 1.0

    assert resize_or_crop != None
    assert image_size != None

    if resize_or_crop == 'resize':
        image = cv2.resize(image, image_size, interpolation=cv2.INTER_AREA)
    elif resize_or_crop == 'crop':
        image = cv2.crop(image, image_size)

    if np.size(np.shape(image)) == 3: 
        image = np.expand_dims(image, axis=0)

    image = np.array(image, dtype=np.float32)
    return image 
Example #14
Source File: test_task.py    From ibllib with MIT License 6 votes vote down vote up
def test_responsive_units(self):
        if self.test_data is None:
            return
        spike_times = self.test_data['spike_times']
        spike_clusters = self.test_data['spike_clusters']
        event_times = self.test_data['event_times']
        alpha = 0.5
        sig_units, stats, p_values, cluster_ids = bb.task.responsive_units(spike_times,
                                                                           spike_clusters,
                                                                           event_times,
                                                                           pre_time=[0.5, 0],
                                                                           post_time=[0, 0.5],
                                                                           alpha=alpha)
        num_clusters = np.size(np.unique(spike_clusters))
        self.assertTrue(np.size(sig_units) == 125)
        self.assertTrue(np.sum(p_values < alpha) == np.size(sig_units))
        self.assertTrue(np.size(cluster_ids) == num_clusters) 
Example #15
Source File: gw.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def si_c(self, ww, use_numba_impl=False):
    from numpy.linalg import solve
    """ 
    This computes the correlation part of the screened interaction W_c
    by solving <self.nprod> linear equations (1-K chi0) W = K chi0 K 
    or v_{ind}\sim W_{c} = (1-v\chi_{0})^{-1}v\chi_{0}v
    scr_inter[w,p,q], where w in ww, p and q in 0..self.nprod 
    """

    if not hasattr(self, 'pab2v_den'):
      self.pab2v_den = einsum('pab->apb', self.pb.get_ac_vertex_array())

    si0 = np.zeros((ww.size, self.nprod, self.nprod), dtype=self.dtypeComplex)
    if use_numba and use_numba_impl:

        # numba implementation suffer from some continuous array issue
        # for example in test test_0087_o2_gw.py
        # use only for expeimental test
        si_correlation_numba(si0, ww, self.x, self.kernel_sq, self.ksn2f, self.ksn2e,
                             self.pab2v_den, self.nprod, self.norbs, self.bsize,
                             self.nspin, self.nfermi, self.vstart)
    else:
        si_correlation(rf0_den(self, ww), si0, ww, self.kernel_sq, self.nprod)
    return si0 
Example #16
Source File: training_trials.py    From ibllib with MIT License 5 votes vote down vote up
def get_feedback_times_ge5(session_path, data=False):
    # ger err and no go trig times -- look for BNC2High of trial -- verify
    # only 2 onset times go tone and noise, select 2nd/-1 OR select the one
    # that is grater than the nogo or err trial onset time
    if not data:
        data = raw.load_data(session_path)
    missed_bnc2 = 0
    rw_times, err_sound_times, merge = [np.zeros([len(data), ]) for _ in range(3)]

    for ind, tr in enumerate(data):
        st = tr['behavior_data']['Events timestamps'].get('BNC2High', None)
        if not st:
            st = np.array([np.nan, np.nan])
            missed_bnc2 += 1
        # xonar soundcard duplicates events, remove consecutive events too close together
        st = np.delete(st, np.where(np.diff(st) < 0.020)[0] + 1)
        rw_times[ind] = tr['behavior_data']['States timestamps']['reward'][0][0]
        # get the error sound only if the reward is nan
        err_sound_times[ind] = st[-1] if st.size >= 2 and np.isnan(rw_times[ind]) else np.nan
    if missed_bnc2 == len(data):
        logger_.warning('No BNC2 for feedback times, filling error trials NaNs')
    merge *= np.nan
    merge[~np.isnan(rw_times)] = rw_times[~np.isnan(rw_times)]
    merge[~np.isnan(err_sound_times)] = err_sound_times[~np.isnan(err_sound_times)]

    return merge 
Example #17
Source File: utils_deblur.py    From KAIR with MIT License 5 votes vote down vote up
def zero_pad(image, shape, position='corner'):
    """
    Extends image to a certain size with zeros
    Parameters
    ----------
    image: real 2d `numpy.ndarray`
        Input image
    shape: tuple of int
        Desired output shape of the image
    position : str, optional
        The position of the input image in the output one:
            * 'corner'
                top-left corner (default)
            * 'center'
                centered
    Returns
    -------
    padded_img: real `numpy.ndarray`
        The zero-padded image
    """
    shape = np.asarray(shape, dtype=int)
    imshape = np.asarray(image.shape, dtype=int)
    if np.alltrue(imshape == shape):
        return image
    if np.any(shape <= 0):
        raise ValueError("ZERO_PAD: null or negative shape given")
    dshape = shape - imshape
    if np.any(dshape < 0):
        raise ValueError("ZERO_PAD: target size smaller than source one")
    pad_img = np.zeros(shape, dtype=image.dtype)
    idx, idy = np.indices(imshape)
    if position == 'center':
        if np.any(dshape % 2 != 0):
            raise ValueError("ZERO_PAD: source and target shapes "
                             "have different parity.")
        offx, offy = dshape // 2
    else:
        offx, offy = (0, 0)
    pad_img[idx + offx, idy + offy] = image
    return pad_img 
Example #18
Source File: test_pivot.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_crosstab_pass_values(self):
        a = np.random.randint(0, 7, size=100)
        b = np.random.randint(0, 3, size=100)
        c = np.random.randint(0, 5, size=100)
        values = np.random.randn(100)

        table = crosstab([a, b], c, values, aggfunc=np.sum,
                         rownames=['foo', 'bar'], colnames=['baz'])

        df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})

        expected = df.pivot_table('values', index=['foo', 'bar'],
                                  columns='baz', aggfunc=np.sum)
        tm.assert_frame_equal(table, expected) 
Example #19
Source File: test_pivot.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_crosstab_margins_set_margin_name(self):
        # GH 15972
        a = np.random.randint(0, 7, size=100)
        b = np.random.randint(0, 3, size=100)
        c = np.random.randint(0, 5, size=100)

        df = DataFrame({'a': a, 'b': b, 'c': c})

        result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
                          margins=True, margins_name='TOTAL')

        assert result.index.names == ('a',)
        assert result.columns.names == ['b', 'c']

        all_cols = result['TOTAL', '']
        exp_cols = df.groupby(['a']).size().astype('i8')
        # to keep index.name
        exp_margin = Series([len(df)], index=Index(['TOTAL'], name='a'))
        exp_cols = exp_cols.append(exp_margin)
        exp_cols.name = ('TOTAL', '')

        tm.assert_series_equal(all_cols, exp_cols)

        all_rows = result.loc['TOTAL']
        exp_rows = df.groupby(['b', 'c']).size().astype('i8')
        exp_rows = exp_rows.append(Series([len(df)], index=[('TOTAL', '')]))
        exp_rows.name = 'TOTAL'

        exp_rows = exp_rows.reindex(all_rows.index)
        exp_rows = exp_rows.fillna(0).astype(np.int64)
        tm.assert_series_equal(all_rows, exp_rows)

        msg = "margins_name argument must be a string"
        for margins_name in [666, None, ['a', 'b']]:
            with pytest.raises(ValueError, match=msg):
                crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
                         margins=True, margins_name=margins_name) 
Example #20
Source File: test_pivot.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_crosstab_margins(self):
        a = np.random.randint(0, 7, size=100)
        b = np.random.randint(0, 3, size=100)
        c = np.random.randint(0, 5, size=100)

        df = DataFrame({'a': a, 'b': b, 'c': c})

        result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
                          margins=True)

        assert result.index.names == ('a',)
        assert result.columns.names == ['b', 'c']

        all_cols = result['All', '']
        exp_cols = df.groupby(['a']).size().astype('i8')
        # to keep index.name
        exp_margin = Series([len(df)], index=Index(['All'], name='a'))
        exp_cols = exp_cols.append(exp_margin)
        exp_cols.name = ('All', '')

        tm.assert_series_equal(all_cols, exp_cols)

        all_rows = result.loc['All']
        exp_rows = df.groupby(['b', 'c']).size().astype('i8')
        exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
        exp_rows.name = 'All'

        exp_rows = exp_rows.reindex(all_rows.index)
        exp_rows = exp_rows.fillna(0).astype(np.int64)
        tm.assert_series_equal(all_rows, exp_rows) 
Example #21
Source File: test_pivot.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_crosstab_multiple(self):
        df = self.df

        result = crosstab(df['A'], [df['B'], df['C']])
        expected = df.groupby(['A', 'B', 'C']).size()
        expected = expected.unstack(
            'B').unstack('C').fillna(0).astype(np.int64)
        tm.assert_frame_equal(result, expected)

        result = crosstab([df['B'], df['C']], df['A'])
        expected = df.groupby(['B', 'C', 'A']).size()
        expected = expected.unstack('A').fillna(0).astype(np.int64)
        tm.assert_frame_equal(result, expected) 
Example #22
Source File: utils_deblur.py    From KAIR with MIT License 5 votes vote down vote up
def fspecial_gauss(size, sigma):
    x, y = mgrid[-size // 2 + 1 : size // 2 + 1, -size // 2 + 1 : size // 2 + 1]
    g = exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))
    return g / g.sum() 
Example #23
Source File: utils_deblur.py    From KAIR with MIT License 5 votes vote down vote up
def kernelFromTrajectory(x):
    h = 5 - log(rand()) / 0.15
    h = round(min([h, 27])).astype(int)
    h = h + 1 - h % 2
    w = h
    k = zeros((h, w))

    xmin = min(x[0])
    xmax = max(x[0])
    ymin = min(x[1])
    ymax = max(x[1])
    xthr = arange(xmin, xmax, (xmax - xmin) / w)
    ythr = arange(ymin, ymax, (ymax - ymin) / h)

    for i in range(1, xthr.size):
        for j in range(1, ythr.size):
            idx = (
                (x[0, :] >= xthr[i - 1])
                & (x[0, :] < xthr[i])
                & (x[1, :] >= ythr[j - 1])
                & (x[1, :] < ythr[j])
            )
            k[i - 1, j - 1] = sum(idx)
    if sum(k) == 0:
        return
    k = k / sum(k)
    k = convolve2d(k, fspecial_gauss(3, 1), "same")
    k = k / sum(k)
    return k 
Example #24
Source File: kernels.py    From aboleth with Apache License 2.0 5 votes vote down vote up
def weights(self, input_dim, n_features, dtype=np.float32):
        """Generate the random fourier weights for this kernel.

        Parameters
        ----------
        input_dim : int
            the input dimension to this layer.
        n_features : int
            the number of unique random features, the actual output dimension
            of this layer will be ``2 * n_features``.
        dtype : np.dtype
            the dtype of the features to draw, this should match the
            observations.

        Returns
        -------
        P : ndarray
            the random weights of the fourier features of shape
            ``(input_dim, n_features)``.
        KL : Tensor, float
            the KL penalty associated with the parameters in this kernel (0.0).

        """
        # p is the matern number (v = p + .5) and the two is a transformation
        # of variables between Rasmussen 2006 p84 and the CF of a Multivariate
        # Student t (see wikipedia). Also see "A Note on the Characteristic
        # Function of Multivariate t Distribution":
        #   http://ocean.kisti.re.kr/downfile/volume/kss/GCGHC8/2014/v21n1/
        #   GCGHC8_2014_v21n1_81.pdf
        # To sample from a m.v. t we use the formula
        # from wikipedia, x = y * np.sqrt(df / u) where y ~ norm(0, I),
        # u ~ chi2(df), then x ~ mvt(0, I, df)
        self.lenscale, _ = _init_lenscale(self.given_lenscale,
                                          self.learn_lenscale, input_dim)
        df = 2 * (self.p + 0.5)
        y = self._random_state.randn(input_dim, n_features)
        u = self._random_state.chisquare(df, size=(n_features,))
        P = (y * np.sqrt(df / u)).astype(dtype) / \
            tf.expand_dims(self.lenscale, axis=-1)
        return P, 0. 
Example #25
Source File: test_pivot.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_crosstab_single(self):
        df = self.df
        result = crosstab(df['A'], df['C'])
        expected = df.groupby(['A', 'C']).size().unstack()
        tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64)) 
Example #26
Source File: test_regression.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_reshape_size_overflow(self):
        # gh-7455
        a = np.ones(20)[::2]
        if np.dtype(np.intp).itemsize == 8:
            # 64 bit. The following are the prime factors of 2**63 + 5,
            # plus a leading 2, so when multiplied together as int64,
            # the result overflows to a total size of 10.
            new_shape = (2, 13, 419, 691, 823, 2977518503)
        else:
            # 32 bit. The following are the prime factors of 2**31 + 5,
            # plus a leading 2, so when multiplied together as int32,
            # the result overflows to a total size of 10.
            new_shape = (2, 7, 7, 43826197)
        assert_raises(ValueError, a.reshape, new_shape) 
Example #27
Source File: test_regression.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_huge_arange(self):
        # Regression test for #1062.
        # Set a size which cannot fit into a 64 bits signed integer
        sz = 2 ** 64
        with assert_raises_regex(ValueError,
                                 'Maximum allowed size exceeded'):
            np.arange(sz)
            assert_(np.size == sz) 
Example #28
Source File: test_numeric.py    From recruit with Apache License 2.0 5 votes vote down vote up
def check_function(self, func, fill_value=None):
        par = ((0, 1, 2),
               range(self.ndims),
               self.orders,
               self.dtypes)
        fill_kwarg = {}
        if fill_value is not None:
            fill_kwarg = {'fill_value': fill_value}

        for size, ndims, order, dtype in itertools.product(*par):
            shape = ndims * [size]

            # do not fill void type
            if fill_kwarg and dtype.str.startswith('|V'):
                continue

            arr = func(shape, order=order, dtype=dtype,
                       **fill_kwarg)

            assert_equal(arr.dtype, dtype)
            assert_(getattr(arr.flags, self.orders[order]))

            if fill_value is not None:
                if dtype.str.startswith('|S'):
                    val = str(fill_value)
                else:
                    val = fill_value
                assert_equal(arr, dtype.type(val)) 
Example #29
Source File: test_numeric.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_boolean(self):
        a = rand(3, 5, 8)
        V = rand(5, 8)
        g1 = randint(0, 5, size=15)
        g2 = randint(0, 8, size=15)
        V[g1, g2] = -V[g1, g2]
        assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) 
Example #30
Source File: test_pivot.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_crosstab_with_numpy_size(self):
        # GH 4003
        df = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 6,
                           'B': ['A', 'B', 'C'] * 8,
                           'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 4,
                           'D': np.random.randn(24),
                           'E': np.random.randn(24)})
        result = pd.crosstab(index=[df['A'], df['B']],
                             columns=[df['C']],
                             margins=True,
                             aggfunc=np.size,
                             values=df['D'])
        expected_index = pd.MultiIndex(levels=[['All', 'one', 'three', 'two'],
                                               ['', 'A', 'B', 'C']],
                                       codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0],
                                              [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]],
                                       names=['A', 'B'])
        expected_column = pd.Index(['bar', 'foo', 'All'],
                                   dtype='object',
                                   name='C')
        expected_data = np.array([[2., 2., 4.],
                                  [2., 2., 4.],
                                  [2., 2., 4.],
                                  [2., np.nan, 2.],
                                  [np.nan, 2., 2.],
                                  [2., np.nan, 2.],
                                  [np.nan, 2., 2.],
                                  [2., np.nan, 2.],
                                  [np.nan, 2., 2.],
                                  [12., 12., 24.]])
        expected = pd.DataFrame(expected_data,
                                index=expected_index,
                                columns=expected_column)
        tm.assert_frame_equal(result, expected)