Python numpy.size() Examples

The following are 30 code examples for showing how to use numpy.size(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: Tensorflow-YOLOv3   Author: kcosta42   File: utils.py    License: MIT License 9 votes vote down vote up
def draw_boxes_frame(frame, frame_size, boxes_dicts, class_names, input_size):
  """Draws detected boxes in a video frame"""
  boxes_dict = boxes_dicts[0]
  resize_factor = (frame_size[0] / input_size[1], frame_size[1] / input_size[0])
  for cls in range(len(class_names)):
    boxes = boxes_dict[cls]
    color = (0, 0, 255)
    if np.size(boxes) != 0:
      for box in boxes:
        xy = box[:4]
        xy = [int(xy[i] * resize_factor[i % 2]) for i in range(4)]
        cv2.rectangle(frame, (xy[0], xy[1]), (xy[2], xy[3]), color[::-1], 2)
        (test_width, text_height), baseline = cv2.getTextSize(class_names[cls],
                                                              cv2.FONT_HERSHEY_SIMPLEX,
                                                              0.75, 1)
        cv2.rectangle(frame,
                      (xy[0], xy[1]),
                      (xy[0] + test_width, xy[1] - text_height - baseline),
                      color[::-1],
                      thickness=cv2.FILLED)
        cv2.putText(frame, class_names[cls], (xy[0], xy[1] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1) 
Example 2
Project: fin   Author: vsmolyakov   File: olmar.py    License: MIT License 6 votes vote down vote up
def initialize(context, eps = 10, window_length = 50):
    
    #init    
    context.stocks = STOCKS
    context.sids = SIDS
    #context.sids = [context.symbol(symb) for symb in context.stocks]
    context.m = np.size(STOCKS)
    context.price = {}
    context.b_t = np.ones(context.m)/float(context.m)
    context.prev_weights = np.ones(context.m)/float(context.m)
    context.eps = eps
    context.init = True
    context.days = 0
    context.window_length = window_length
    
    add_history(window_length, '1d', 'price')
    
    #set commision and slippage
    #context.set_commision(commission.PerShare(cost=0))
    #context.set_slippage(slippage.VolumeShareSlippage(volume_limit=0.25, price_impact=0.1)) 
Example 3
Project: fin   Author: vsmolyakov   File: olmar.py    License: MIT License 6 votes vote down vote up
def analyze(context=None, results=None):
        
    f, (ax1, ax2, ax3) = plt.subplots(3, sharex = True)        
    ax1.plot(results.portfolio_value, linewidth = 2.0, label = 'porfolio')
    ax1.set_title('On-Line Moving Average Reversion')
    ax1.set_ylabel('Portfolio value (USD)')
    ax1.legend(loc=0)
    ax1.grid(True)
            
    ax2.plot(results['AAPL'], color = 'b', linestyle = '-', linewidth = 2.0, label = 'AAPL')
    ax2.plot(results['MSFT'], color = 'r', linestyle = '-', linewidth = 2.0, label = 'MSFT')
    ax2.set_ylabel('stock price (USD)')
    ax2.legend(loc=0)
    ax2.grid(True)
    
    ax3.semilogy(results['step_size'], color = 'b', linestyle = '-', linewidth = 2.0, label = 'step-size')
    ax3.semilogy(results['variability'], color = 'r', linestyle = '-', linewidth = 2.0, label = 'variability')
    ax3.legend(loc=0)
    ax3.grid(True)
    
    plt.show() 
Example 4
Project: quadcopter-simulation   Author: hbd730   File: quadPlot.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def set_frame(frame):
    # convert 3x6 world_frame matrix into three line_data objects which is 3x2 (row:point index, column:x,y,z)
    lines_data = [frame[:,[0,2]], frame[:,[1,3]], frame[:,[4,5]]]
    ax = plt.gca()
    lines = ax.get_lines()
    for line, line_data in zip(lines[:3], lines_data):
        x, y, z = line_data
        line.set_data(x, y)
        line.set_3d_properties(z)

    global history, count
    # plot history trajectory
    history[count] = frame[:,4]
    if count < np.size(history, 0) - 1:
        count += 1
    zline = history[:count,-1]
    xline = history[:count,0]
    yline = history[:count,1]
    lines[-1].set_data(xline, yline)
    lines[-1].set_3d_properties(zline)
    # ax.plot3D(xline, yline, zline, 'blue') 
Example 5
Project: pyscf   Author: pyscf   File: kpts_helper.py    License: Apache License 2.0 6 votes vote down vote up
def flush(self):
        """
        Composes the vector.
        Returns:
            The composed vector.
        """
        if self.__data__ is None:
            self.__data__ = result = np.empty(self.__total_size__, dtype=self.__dtype__)
            offset = 0
        else:
            offset = self.__data__.size
            self.__data__ = result = np.empty(self.__total_size__ + self.__data__.size, dtype=self.__dtype__)

        for i in self.__transactions__:
            s = i.size
            result[offset:offset + s] = i.reshape(-1)
            offset += s
        self.__transactions__ = []

        return result 
Example 6
Project: pyscf   Author: pyscf   File: gw.py    License: Apache License 2.0 6 votes vote down vote up
def si_c(self, ww, use_numba_impl=False):
    from numpy.linalg import solve
    """ 
    This computes the correlation part of the screened interaction W_c
    by solving <self.nprod> linear equations (1-K chi0) W = K chi0 K 
    or v_{ind}\sim W_{c} = (1-v\chi_{0})^{-1}v\chi_{0}v
    scr_inter[w,p,q], where w in ww, p and q in 0..self.nprod 
    """

    if not hasattr(self, 'pab2v_den'):
      self.pab2v_den = einsum('pab->apb', self.pb.get_ac_vertex_array())

    si0 = np.zeros((ww.size, self.nprod, self.nprod), dtype=self.dtypeComplex)
    if use_numba and use_numba_impl:

        # numba implementation suffer from some continuous array issue
        # for example in test test_0087_o2_gw.py
        # use only for expeimental test
        si_correlation_numba(si0, ww, self.x, self.kernel_sq, self.ksn2f, self.ksn2e,
                             self.pab2v_den, self.nprod, self.norbs, self.bsize,
                             self.nspin, self.nfermi, self.vstart)
    else:
        si_correlation(rf0_den(self, ww), si0, ww, self.kernel_sq, self.nprod)
    return si0 
Example 7
Project: feets   Author: quatrope   File: core.py    License: MIT License 6 votes vote down vote up
def __repr__(self):
        """x.__repr__() <==> repr(x)."""
        if not hasattr(self, "__repr"):
            params = self.params or {}
            parsed_params = []
            for k, v in params.items():
                sk = str(k)
                if np.ndim(v) != 0 and np.size(v) > MAX_VALUES_TO_REPR:
                    tv = type(v)
                    sv = f"<{tv.__module__}.{tv.__name__}>"
                else:
                    sv = str(v)
                parsed_params.append(f"{sk}={sv}")
            str_params = ", ".join(parsed_params)
            self.__repr = f"{self.name}({str_params})"

        return self.__repr 
Example 8
Project: DeblurGAN-tf   Author: LeeDoYup   File: data_loader.py    License: MIT License 6 votes vote down vote up
def read_image_pair(pair_path, resize_or_crop=None, image_size=(256,256)):
    image_blur = cv2.imread(pair_path[0], cv2.IMREAD_COLOR)
    image_blur = image_blur / 255.0 * 2.0 - 1.0
    image_real = cv2.imread(pair_path[1], cv2.IMREAD_COLOR)
    image_real = image_real / 255.0 * 2.0 - 1.0

    if resize_or_crop != None: 
        assert image_size != None

    if resize_or_crop == 'resize':
        image_blur = cv2.resize(image_blur, image_size, interpolation=cv2.INTER_AREA)
        image_real = cv2.resize(image_real, image_size, interpolation=cv2.INTER_AREA)
    elif resize_or_crop == 'crop':
        image_blur = cv2.crop(image_blur, image_size)
        image_real = cv2.crop(image_real, image_size)
    else:
        raise

    if np.size(np.shape(image_blur)) == 3:
        image_blur = np.expand_dims(image_blur, axis=0)
    if np.size(np.shape(image_real)) == 3:
        image_real = np.expand_dims(image_real, axis=0)
    image_blur = np.array(image_blur, dtype=np.float32)
    image_real = np.array(image_real, dtype=np.float32)
    return image_blur, image_real 
Example 9
Project: DeblurGAN-tf   Author: LeeDoYup   File: data_loader.py    License: MIT License 6 votes vote down vote up
def read_image(path, resize_or_crop=None, image_size=(256,256)):
    image = cv2.imread(path, cv2.IMREAD_COLOR)
    image = image/255.0 * 2.0 - 1.0

    assert resize_or_crop != None
    assert image_size != None

    if resize_or_crop == 'resize':
        image = cv2.resize(image, image_size, interpolation=cv2.INTER_AREA)
    elif resize_or_crop == 'crop':
        image = cv2.crop(image, image_size)

    if np.size(np.shape(image)) == 3: 
        image = np.expand_dims(image, axis=0)

    image = np.array(image, dtype=np.float32)
    return image 
Example 10
Project: recruit   Author: Frank-qlu   File: test_numeric.py    License: Apache License 2.0 6 votes vote down vote up
def test_count_nonzero_axis_consistent(self):
        # Check that the axis behaviour for valid axes in
        # non-special cases is consistent (and therefore
        # correct) by checking it against an integer array
        # that is then casted to the generic object dtype
        from itertools import combinations, permutations

        axis = (0, 1, 2, 3)
        size = (5, 5, 5, 5)
        msg = "Mismatch for axis: %s"

        rng = np.random.RandomState(1234)
        m = rng.randint(-100, 100, size=size)
        n = m.astype(object)

        for length in range(len(axis)):
            for combo in combinations(axis, length):
                for perm in permutations(combo):
                    assert_equal(
                        np.count_nonzero(m, axis=perm),
                        np.count_nonzero(n, axis=perm),
                        err_msg=msg % (perm,)) 
Example 11
Project: recruit   Author: Frank-qlu   File: test_function.py    License: Apache License 2.0 6 votes vote down vote up
def test_count_uses_size_on_exception():
    class RaisingObjectException(Exception):
        pass

    class RaisingObject(object):

        def __init__(self, msg='I will raise inside Cython'):
            super(RaisingObject, self).__init__()
            self.msg = msg

        def __eq__(self, other):
            # gets called in Cython to check that raising calls the method
            raise RaisingObjectException(self.msg)

    df = DataFrame({'a': [RaisingObject() for _ in range(4)],
                    'grp': list('ab' * 2)})
    result = df.groupby('grp').count()
    expected = DataFrame({'a': [2, 2]}, index=pd.Index(
        list('ab'), name='grp'))
    tm.assert_frame_equal(result, expected)


# size
# -------------------------------- 
Example 12
Project: ibllib   Author: int-brain-lab   File: test_task.py    License: MIT License 6 votes vote down vote up
def test_responsive_units(self):
        if self.test_data is None:
            return
        spike_times = self.test_data['spike_times']
        spike_clusters = self.test_data['spike_clusters']
        event_times = self.test_data['event_times']
        alpha = 0.5
        sig_units, stats, p_values, cluster_ids = bb.task.responsive_units(spike_times,
                                                                           spike_clusters,
                                                                           event_times,
                                                                           pre_time=[0.5, 0],
                                                                           post_time=[0, 0.5],
                                                                           alpha=alpha)
        num_clusters = np.size(np.unique(spike_clusters))
        self.assertTrue(np.size(sig_units) == 125)
        self.assertTrue(np.sum(p_values < alpha) == np.size(sig_units))
        self.assertTrue(np.size(cluster_ids) == num_clusters) 
Example 13
Project: ibllib   Author: int-brain-lab   File: test_task.py    License: MIT License 6 votes vote down vote up
def test_roc_between_two_events(self):
        if self.test_data is None:
            return
        spike_times = self.test_data['spike_times']
        spike_clusters = self.test_data['spike_clusters']
        event_times = self.test_data['event_times']
        event_groups = self.test_data['event_groups']
        auc_roc, cluster_ids = bb.task.roc_between_two_events(spike_times,
                                                              spike_clusters,
                                                              event_times,
                                                              event_groups,
                                                              pre_time=0.5,
                                                              post_time=0.5)
        num_clusters = np.size(np.unique(spike_clusters))
        self.assertTrue(np.sum(auc_roc < 0.3) == 24)
        self.assertTrue(np.sum(auc_roc > 0.7) == 10)
        self.assertTrue(np.size(cluster_ids) == num_clusters) 
Example 14
Project: reinforcement-learning-an-introduction   Author: ShangtongZhang   File: car_rental_synchronous.py    License: MIT License 6 votes vote down vote up
def policy_improvement(self, actions, values, policy):
        new_policy = np.copy(policy)

        expected_action_returns = np.zeros((MAX_CARS + 1, MAX_CARS + 1, np.size(actions)))
        cooks = dict()
        with mp.Pool(processes=8) as p:
            for action in actions:
                k = np.arange(MAX_CARS + 1)
                all_states = ((i, j) for i, j in itertools.product(k, k))
                cooks[action] = partial(self.expected_return_pi, values, action)
                results = p.map(cooks[action], all_states)
                for v, i, j, a in results:
                    expected_action_returns[i, j, self.inverse_actions[a]] = v
        for i in range(expected_action_returns.shape[0]):
            for j in range(expected_action_returns.shape[1]):
                new_policy[i, j] = actions[np.argmax(expected_action_returns[i, j])]

        policy_change = (new_policy != policy).sum()
        print(f'Policy changed in {policy_change} states')
        return policy_change, new_policy

    # O(n^4) computation for all possible requests and returns 
Example 15
Project: aboleth   Author: gradientinstitute   File: kernels.py    License: Apache License 2.0 6 votes vote down vote up
def _init_lenscale(given_lenscale, learn_lenscale, input_dim):
    """Provide the lenscale variable and its initial value."""
    given_lenscale = (np.sqrt(1.0 / input_dim) if given_lenscale is None
                      else np.array(given_lenscale).squeeze()).astype(
                          np.float32)

    if learn_lenscale:
        lenscale = pos_variable(given_lenscale, name="kernel_lenscale")
        if np.size(given_lenscale) == 1:
            summary_scalar(lenscale)
        else:
            summary_histogram(lenscale)
    else:
        lenscale = given_lenscale

    lenscale_vec = tf.ones(input_dim, dtype=tf.float32) * lenscale
    init_lenscale = given_lenscale * np.ones(input_dim, dtype=np.float32)
    return lenscale_vec, init_lenscale 
Example 16
Project: models   Author: kipoi   File: bio_utils.py    License: MIT License 5 votes vote down vote up
def remove_post_padding(signal):
    X_not_padded = len(signal) * [[]]

    for i in range(len(signal)):
        pad_initIdx = np.where(signal[i] == 0)
        if np.size(pad_initIdx) == 0:
            X_not_padded[i] = signal[i]
        else:
            X_not_padded[i] = signal[i][0:pad_initIdx[0][0]]

    return (X_not_padded) 
Example 17
Project: discomll   Author: romanorac   File: naivebayes.py    License: Apache License 2.0 5 votes vote down vote up
def map_fit(interface, state, label, inp):
    """
    Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns
    number of values and it calculates mean and variance for every feature.
    For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs:
    label, feature index, feature values.
    """
    import numpy as np
    combiner = {}  # combiner used for joining of intermediate pairs
    out = interface.output(0)  # all outputted pairs have the same output label

    for row in inp:  # for every row in data chunk
        row = row.strip().split(state["delimiter"])  # split row
        if len(row) > 1:  # check if row is empty
            for i, j in enumerate(state["X_indices"]):  # for defined features
                if row[j] not in state["missing_vals"]:  # check missing values
                    # creates a pair - label, feature index
                    pair = row[state["y_index"]] + state["delimiter"] + str(j)

                    if state["X_meta"][i] == "c":  # continuous features
                        if pair in combiner:
                            # convert to float and store value
                            combiner[pair].append(np.float32(row[j]))
                        else:
                            combiner[pair] = [np.float32(row[j])]

                    else:  # discrete features
                        # add feature value to pair
                        pair += state["delimiter"] + row[j]
                        # increase counts of current pair
                        combiner[pair] = combiner.get(pair, 0) + 1

                    # increase label counts
                    combiner[row[state["y_index"]]] = combiner.get(row[state["y_index"]], 0) + 1

    for k, v in combiner.iteritems():  # all pairs in combiner are output
        if len(k.split(state["delimiter"])) == 2:  # continous features
            # number of elements, partial mean and variance
            out.add(k, (np.size(v), np.mean(v, dtype=np.float32), np.var(v, dtype=np.float32)))
        else:  # discrete features and labels
            out.add(k, v) 
Example 18
Project: pynvr   Author: JFF-Bohdan   File: motion_detection.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (21, 21), 0)

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        frameDiff = cv.absdiff(gray, self.prevFrame)

        # kernel = np.ones((5, 5), np.uint8)

        opening = cv.morphologyEx(frameDiff, cv.MORPH_OPEN, None)  # noqa
        closing = cv.morphologyEx(frameDiff, cv.MORPH_CLOSE, None)  # noqa

        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        height = np.size(th1, 0)
        width = np.size(th1, 1)

        nb = cv.countNonZero(th1)

        avg = (nb * 100) / (height * width)  # Calculate the average of black pixel in the image

        self.prevFrame = gray

        # cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
        # cv.imshow("frame", current_frame)

        ret = avg > self.threshold   # If over the ceiling trigger the alarm

        if ret:
            self.updateMotionDetectionDts()

        return ret 
Example 19
Project: differential-privacy-library   Author: IBM   File: validation.py    License: MIT License 5 votes vote down vote up
def clip_to_bounds(array, bounds):
    """Clips the examples of a 2-dimensional array to given bounds.

    Parameters
    ----------
    array : np.ndarray
        Array to be clipped.  After clipping, all examples have a 2-norm of at most `clip`.

    bounds : tuple
        Tuple of bounds of the form (min, max) which the array is to be clipped to. `min` and `max` must be scalar,
        unless array is 2-dimensional.

    Returns
    -------
    array : np.ndarray
        The clipped array.

    """
    if not isinstance(array, np.ndarray):
        raise TypeError("Input array must be a numpy array, got {}.".format(type(array)))

    if np.shape(bounds[0]) != np.shape(bounds[1]):
        raise ValueError("Bounds must be of the same shape, got {} and {}.".format(np.shape(bounds[0]),
                                                                                   np.shape(bounds[1])))

    lower, upper = check_bounds(bounds, np.size(bounds[0]), min_separation=0)
    clipped_array = array.copy()

    if np.allclose(lower, np.min(lower)) and np.allclose(upper, np.max(upper)):
        clipped_array = np.clip(clipped_array, np.min(lower), np.max(upper))
    else:
        if array.ndim != 2:
            raise ValueError("For non-scalar bounds, input array must be 2-dimensional. Got %d dimensions." %
                             array.ndim)

        for feature in range(array.shape[1]):
            clipped_array[:, feature] = np.clip(array[:, feature], lower[feature], upper[feature])

    return clipped_array 
Example 20
Project: fin   Author: vsmolyakov   File: olmar.py    License: MIT License 5 votes vote down vote up
def simplex_projection(v, b=1):
    
    v = np.array(v)
    p = np.size(v)
    
    v = (v > 0)*v
    u = np.sort(v)[::-1]
    sv = np.cumsum(u)
    
    rho = np.where(u > (sv-b) / np.arange(1,p+1))[0][-1]
    theta = np.max([0, (sv[rho]-b)/(rho+1)])
    w = v - theta
    w[w<0] = 0
    
    return w 
Example 21
Project: pymoo   Author: msu-coinlab   File: go_funcs_G.py    License: Apache License 2.0 5 votes vote down vote up
def fun(self, x, *args):
        self.nfev += 1

        i = arange(1., np.size(x) + 1.)
        return sum(x ** 2 / 4000) - prod(cos(x / sqrt(i))) + 1 
Example 22
Project: pymoo   Author: msu-coinlab   File: rmetric.py    License: Apache License 2.0 5 votes vote down vote up
def _filter(self):

        def check_dominance(a, b, n_obj):
            flag1 = False
            flag2 = False
            for i in range(n_obj):
                if a[i] < b[i]:
                    flag1 = True
                else:
                    if a[i] > b[i]:
                        flag2 = True
            if flag1 and not flag2:
                return 1
            elif not flag1 and flag2:
                return -1
            else:
                return 0

        num_objs = np.size(self.curr_pop, axis=1)
        index_array = np.zeros(np.size(self.curr_pop, axis=0))
        for i in range(np.size(self.curr_pop, 0)):
            for j in range(np.size(self.whole_pop, 0)):
                flag = check_dominance(self.curr_pop[i, :], self.whole_pop[j, :], num_objs)
                if flag == -1:
                    index_array[i] = 1
                    break
        final_index = np.logical_not(index_array)
        filtered_pop = self.curr_pop[final_index, :]

        return filtered_pop 
Example 23
Project: pymoo   Author: msu-coinlab   File: rmetric.py    License: Apache License 2.0 5 votes vote down vote up
def _preprocess(self, data, ref_point, w_point):

        datasize = np.size(data, 0)

        # Identify representative point
        ref_matrix = np.tile(ref_point, (datasize, 1))
        w_matrix = np.tile(w_point, (datasize, 1))
        # ratio of distance to the ref point over the distance between the w_point and the ref_point
        diff_matrix = (data - ref_matrix) / (w_matrix - ref_matrix)
        agg_value = np.amax(diff_matrix, axis=1)
        idx = np.argmin(agg_value)
        zp = [data[idx, :]]

        return zp, 
Example 24
Project: scarlet   Author: pmelchior   File: resampling.py    License: MIT License 5 votes vote down vote up
def _pix2radec(coord, wcs):
    """Converts coordinates from pixels to Ra-Dec given a wcs
    """
    y,x = coord
    if np.size(wcs.array_shape) == 2:
        ra, dec = wcs.all_pix2world(x, y, 0, ra_dec_order=True)
    elif np.size(wcs.array_shape) == 3:
        ra, dec = wcs.all_pix2world(x, y, 0, 0, ra_dec_order=True)
    else:
        raise ValueError("WCSs must have either 2 or 3 dimensions. Received "+str(np.size(wcs.array_shape))+".")
    return (ra, dec) 
Example 25
Project: scarlet   Author: pmelchior   File: resampling.py    License: MIT License 5 votes vote down vote up
def _radec2pix(coord, wcs):
    """Converts coordinates from Ra-Dec to pixels given a wcs
    """
    ra, dec = coord
    # Positions of coords  in the frame of the obs
    if np.size(wcs.array_shape) == 2:
        X, Y = wcs.all_world2pix(ra, dec, 0, ra_dec_order=True)
    elif np.size(wcs.array_shape) == 3:
        X, Y, _ = wcs.all_world2pix(ra, dec, 0, 0, ra_dec_order=True)
    else:
        raise ValueError("WCSs must have either 2 or 3 dimensions. Received "+str(np.size(wcs.array_shape))+".")
    return (Y, X) 
Example 26
Project: Tensorflow-YOLOv3   Author: kcosta42   File: utils.py    License: MIT License 5 votes vote down vote up
def load_image(img_path, input_size):
  """Loads image in a 4D array"""
  img = Image.open(img_path)
  img = img.resize(size=input_size)
  img = np.array(img, dtype=np.float32)
  img = np.expand_dims(img[:, :, :3], axis=0)
  return img 
Example 27
Project: Tensorflow-YOLOv3   Author: kcosta42   File: utils.py    License: MIT License 5 votes vote down vote up
def draw_boxes(img_name, boxes_dict, class_names, input_size):
  """Draws detected boxes"""
  img = Image.open(img_name)
  draw = ImageDraw.Draw(img)
  font = ImageFont.truetype(font="./data/Roboto-Black.ttf", size=(img.size[0] + img.size[1]) // 100)
  resize_factor = (img.size[0] / input_size[0], img.size[1] / input_size[1])

  for cls in range(len(class_names)):
    boxes = boxes_dict[cls]

    if np.size(boxes) != 0:
      for box in boxes:
        xy, confidence = box[:4], box[4]
        xy = [xy[i] * resize_factor[i % 2] for i in range(4)]
        x0, y0 = xy[0], xy[1]
        thickness = (img.size[0] + img.size[1]) // 300

        for t in np.linspace(0, 1, thickness):
          xy[0], xy[1] = xy[0] + t, xy[1] + t
          xy[2], xy[3] = xy[2] - t, xy[3] - t
          draw.rectangle(xy, outline="blue")

        text = f"{class_names[cls]} {(confidence * 100):.1f}%"
        text_size = draw.textsize(text, font=font)
        draw.rectangle([x0, y0 - text_size[1], x0 + text_size[0], y0], fill="blue")
        draw.text((x0, y0 - text_size[1]), text, fill="black", font=font)

        print(text)

  rgb_img = img.convert('RGB')
  rgb_img.save('./detections/image_output.jpg')
  print("Image Saved at \"" + './detections/image_output.jpg' + "\"")
  rgb_img.show() 
Example 28
Project: pyscf   Author: pyscf   File: kpts_helper.py    License: Apache License 2.0 5 votes vote down vote up
def member(kpt, kpts):
    kpts = np.reshape(kpts, (len(kpts),kpt.size))
    dk = np.einsum('ki->k', abs(kpts-kpt.ravel()))
    return np.where(dk < KPT_DIFF_TOL)[0] 
Example 29
Project: pyscf   Author: pyscf   File: kpts_helper.py    License: Apache License 2.0 5 votes vote down vote up
def get_kconserv3(cell, kpts, kijkab):
    r'''Get the momentum conservation array for a set of k-points.

    This function is similar to get_kconserv, but instead finds the 'kc'
    that satisfies momentum conservation for 5 k-points,

        (ki + kj + kk - ka - kb - kc) dot a = 2n\pi

    where these kpoints are stored in kijkab[ki, kj, kk, ka, kb].
    '''
    a = cell.lattice_vectors() / (2*np.pi)

    kpts_i, kpts_j, kpts_k, kpts_a, kpts_b = \
            [kpts[x].reshape(-1,3) for x in kijkab]
    shape = [np.size(x) for x in kijkab]
    kconserv = np.zeros(shape, dtype=int)

    kv_kab = kpts_k[:,None,None,:] - kpts_a[:,None,:] - kpts_b
    for i, kpti in enumerate(kpts_i):
        for j, kptj in enumerate(kpts_j):
            kv_ijkab = kv_kab + kpti + kptj
            for c, kptc in enumerate(kpts):
                s = np.einsum('kabx,wx->kabw', kv_ijkab - kptc, a)
                s_int = np.rint(s)
                mask = np.einsum('kabw->kab', abs(s - s_int)) < 1e-9
                kconserv[i,j,mask] = c

    new_shape = [shape[i] for i, x in enumerate(kijkab)
                 if not isinstance(x, (int,np.int))]
    kconserv = kconserv.reshape(new_shape)
    return kconserv 
Example 30
Project: pyscf   Author: pyscf   File: kpts_helper.py    License: Apache License 2.0 5 votes vote down vote up
def get(self, destination, slc=None):
        """
        Retrieves the next array.
        Args:
            destination: the shape of the destination array or the destination array itself;
            slc: an optional slice;

        Returns:
            The array.
        """
        if isinstance(destination, Number):
            destination = np.zeros((destination,), dtype=self.__data__.dtype)
        elif isinstance(destination, tuple):
            destination = np.zeros(destination, dtype=self.__data__.dtype)
        elif isinstance(destination, np.ndarray):
            pass
        else:
            raise ValueError("Unknown destination: %s" % str(destination))

        if slc is None:
            take_size = np.prod(destination.shape)
            take_shape = destination.shape
        else:
            slc = np.ix_(*slc)
            take_size = destination[slc].size
            take_shape = destination[slc].shape

        avail = self.__data__.size - self.__offset__
        if take_size > avail:
            raise ValueError("Insufficient # of elements: required %d %s, found %d" % (take_size, take_shape, avail))

        if slc is None:
            destination[:] = self.__data__[self.__offset__:self.__offset__ + take_size].reshape(take_shape)
        else:
            destination[slc] = self.__data__[self.__offset__:self.__offset__ + take_size].reshape(take_shape)

        self.__offset__ += take_size
        return destination