Python math.ceil() Examples

The following are code examples for showing how to use math.ceil(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: mmdetection   Author: open-mmlab   File: sampler.py    Apache License 2.0 9 votes vote down vote up
def __iter__(self):
        indices = []
        for i, size in enumerate(self.group_sizes):
            if size == 0:
                continue
            indice = np.where(self.flag == i)[0]
            assert len(indice) == size
            np.random.shuffle(indice)
            num_extra = int(np.ceil(size / self.samples_per_gpu)
                            ) * self.samples_per_gpu - len(indice)
            indice = np.concatenate(
                [indice, np.random.choice(indice, num_extra)])
            indices.append(indice)
        indices = np.concatenate(indices)
        indices = [
            indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
            for i in np.random.permutation(
                range(len(indices) // self.samples_per_gpu))
        ]
        indices = np.concatenate(indices)
        indices = indices.astype(np.int64).tolist()
        assert len(indices) == self.num_samples
        return iter(indices) 
Example 2
Project: automl-translation-tools   Author: GoogleCloudPlatform   File: autosplit.py    Apache License 2.0 6 votes vote down vote up
def _autosplit_example_count(total_example_count):
  """Gets autosplit example counts group by ml_use.

  Args:
    total_example_count: int
  Returns:
    Dict[ml_use.MLUse, int]
  """
  train_example_count = int(math.ceil(total_example_count * 0.8))
  validation_example_count = int(
    math.ceil(total_example_count * 0.9 - train_example_count))
  test_example_count = (
      total_example_count - validation_example_count - train_example_count)
  return {
    MLUse.TRAIN: train_example_count,
    MLUse.VALIDATION: validation_example_count,
    MLUse.TEST: test_example_count,
  } 
Example 3
Project: gamereporter   Author: gamesbook   File: game.py    MIT License 6 votes vote down vote up
def __init__(self, item):

        self.id = item.get('id', 0)
        self.averageweight = '%.2f' % item.get('averageweight', 0.0)
        self.percentageweight = '%s' % math.ceil(item.get('averageweight', 0.0) * 20.0)
        self.name = item.get('name', 'NAME?')
        self.description = item.get('desc', 'NAME?')
        self.description_html = item.get('desc', 'NAME?').\
            replace('[', '<').replace(']', '>').replace('\n', '<br/>')
        self.image = item.get('image', '')
        self.categories =  item.get('categories', '???')
        self.mechanics =  item.get('mechanics', '???')
        self.players =  item.get('players', '1-100')
        self.minplayers =  item.get('minplayers', '1')
        self.maxplayers =  item.get('maxplayers', '1')
        self.age =  item.get('age', '1+')
        self.yearpublished =  item.get('yearpublished', '1000')
        self.playingtime =  item.get('playingtime', '100')


# example games - ALL text and images sourced from http://www.boardgamegeek.com 
Example 4
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 6 votes vote down vote up
def from_coords(cls, x, y):
        """
        Creates an ECPoint object from the X and Y integer coordinates of the
        point

        :param x:
            The X coordinate, as an integer

        :param y:
            The Y coordinate, as an integer

        :return:
            An ECPoint object
        """

        x_bytes = int(math.ceil(math.log(x, 2) / 8.0))
        y_bytes = int(math.ceil(math.log(y, 2) / 8.0))

        num_bytes = max(x_bytes, y_bytes)

        byte_string = b'\x04'
        byte_string += int_to_bytes(x, width=num_bytes)
        byte_string += int_to_bytes(y, width=num_bytes)

        return cls(byte_string) 
Example 5
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 6 votes vote down vote up
def bit_size(self):
        """
        :return:
            The bit size of the private key, as an integer
        """

        if self._bit_size is None:
            if self.algorithm == 'rsa':
                prime = self['private_key'].parsed['modulus'].native
            elif self.algorithm == 'dsa':
                prime = self['private_key_algorithm']['parameters']['p'].native
            elif self.algorithm == 'ec':
                prime = self['private_key'].parsed['private_key'].native
            self._bit_size = int(math.ceil(math.log(prime, 2)))
            modulus = self._bit_size % 8
            if modulus != 0:
                self._bit_size += 8 - modulus
        return self._bit_size 
Example 6
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 6 votes vote down vote up
def bit_size(self):
        """
        :return:
            The bit size of the public key, as an integer
        """

        if self._bit_size is None:
            if self.algorithm == 'ec':
                self._bit_size = ((len(self['public_key'].native) - 1) / 2) * 8
            else:
                if self.algorithm == 'rsa':
                    prime = self['public_key'].parsed['modulus'].native
                elif self.algorithm == 'dsa':
                    prime = self['algorithm']['parameters']['p'].native
                self._bit_size = int(math.ceil(math.log(prime, 2)))
                modulus = self._bit_size % 8
                if modulus != 0:
                    self._bit_size += 8 - modulus

        return self._bit_size 
Example 7
Project: flasky   Author: RoseOu   File: selectors.py    MIT License 6 votes vote down vote up
def select(self, timeout=None):
            if timeout is None:
                timeout = None
            elif timeout <= 0:
                timeout = 0
            else:
                # poll() has a resolution of 1 millisecond, round away from
                # zero to wait *at least* timeout seconds.
                timeout = int(math.ceil(timeout * 1e3))
            ready = []
            try:
                fd_event_list = wrap_error(self._poll.poll, timeout)
            except InterruptedError:
                return ready
            for fd, event in fd_event_list:
                events = 0
                if event & ~select.POLLIN:
                    events |= EVENT_WRITE
                if event & ~select.POLLOUT:
                    events |= EVENT_READ

                key = self._key_from_fd(fd)
                if key:
                    ready.append((key, events & key.events))
            return ready 
Example 8
Project: flasky   Author: RoseOu   File: selectors.py    MIT License 6 votes vote down vote up
def select(self, timeout=None):
            if timeout is None:
                timeout = None
            elif timeout <= 0:
                timeout = 0
            else:
                # devpoll() has a resolution of 1 millisecond, round away from
                # zero to wait *at least* timeout seconds.
                timeout = math.ceil(timeout * 1e3)
            ready = []
            try:
                fd_event_list = self._devpoll.poll(timeout)
            except InterruptedError:
                return ready
            for fd, event in fd_event_list:
                events = 0
                if event & ~select.POLLIN:
                    events |= EVENT_WRITE
                if event & ~select.POLLOUT:
                    events |= EVENT_READ

                key = self._key_from_fd(fd)
                if key:
                    ready.append((key, events & key.events))
            return ready 
Example 9
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: utils.py    Apache License 2.0 6 votes vote down vote up
def save_image(data, epoch, image_size, batch_size, output_dir, padding=2):
    """ save image """
    data = data.asnumpy().transpose((0, 2, 3, 1))
    datanp = np.clip(
        (data - np.min(data))*(255.0/(np.max(data) - np.min(data))), 0, 255).astype(np.uint8)
    x_dim = min(8, batch_size)
    y_dim = int(math.ceil(float(batch_size) / x_dim))
    height, width = int(image_size + padding), int(image_size + padding)
    grid = np.zeros((height * y_dim + 1 + padding // 2, width *
                     x_dim + 1 + padding // 2, 3), dtype=np.uint8)
    k = 0
    for y in range(y_dim):
        for x in range(x_dim):
            if k >= batch_size:
                break
            start_y = y * height + 1 + padding // 2
            end_y = start_y + height - padding
            start_x = x * width + 1 + padding // 2
            end_x = start_x + width - padding
            np.copyto(grid[start_y:end_y, start_x:end_x, :], datanp[k])
            k += 1
    imageio.imwrite(
        '{}/fake_samples_epoch_{}.png'.format(output_dir, epoch), grid) 
Example 10
Project: fs_image   Author: facebookincubator   File: test_extents_to_chunks.py    MIT License 5 votes vote down vote up
def _gen_ranges_from_figure(figure: str):
    for s in textwrap.dedent(figure.strip('\n')).split('\n'):
        s = s.rstrip()
        # Number lines should aid reading off positions. Check they're right.
        if re.match('[0-9]*$', s):
            assert ('0123456789' * math.ceil(len(s) / 10))[:len(s)] == s, \
                f'Bad number line {s} in {figure}'
            continue
        offset = 0
        for m in re.finditer(r'(.)\1*', s):
            v = m.group(0)
            if v[0] != ' ':
                yield v[0], offset, len(v)
            offset += len(v) 
Example 11
Project: autolims   Author: scottbecker   File: __init__.py    MIT License 5 votes vote down vote up
def round_up(x,nearest_number=1):
    nearest_number = nearest_number * 1.0
    return int(math.ceil(x / nearest_number)) * nearest_number 
Example 12
Project: autolims   Author: scottbecker   File: utils.py    MIT License 5 votes vote down vote up
def ceil_volume(volume,ndigits=0):
    """
    Converts to microliters and performs ceil
    """
    
    magnitude = volume.to('microliter').magnitude
    power_multiple = math.pow(10,ndigits)
    return ul(math.ceil(magnitude * int(power_multiple)) / power_multiple) 
Example 13
Project: clikit   Author: sdispater   File: section_output.py    MIT License 5 votes vote down vote up
def add_content(self, content):  # type: (str) -> None
        for line_content in content.split("\n"):
            self._lines += (
                math.ceil(
                    len(self.remove_format(line_content).replace("\t", "        "))
                    / self._terminal.width
                )
                or 1
            )
            self._content.append(line_content)
            self._content.append("\n") 
Example 14
Project: clikit   Author: sdispater   File: time.py    MIT License 5 votes vote down vote up
def format_time(secs):  # type: (int) -> str
    for fmt in _TIME_FORMATS:
        if secs > fmt[0]:
            continue

        if len(fmt) == 2:
            return fmt[1]

        return "{} {}".format(math.ceil(secs / fmt[2]), fmt[1]) 
Example 15
Project: heroku-log-lights   Author: codingjoe   File: io.py    MIT License 5 votes vote down vote up
def print_log(log):
    if log.service < 30:
        color = COLORS.GREEN
    elif log.service < 100:
        color = COLORS.BLUE
    elif log.service < 1000:
        color = COLORS.YELLOW
    else:
        color = COLORS.RED
    seconds = math.ceil(math.log(log.service, 1.4101)) % 30
    print(color + "{:>30}".format('')[:seconds] + COLORS.DEFAULT + "{:>30}".format('')[seconds:] + str(log)) 
Example 16
Project: heroku-log-lights   Author: codingjoe   File: io.py    MIT License 5 votes vote down vote up
def print_matrix(matrix, slots):
    cs = 255 / matrix.height
    while True:
        for col, slot in enumerate(slots, start=1):
            col = matrix.width / 2 + (int(col / 2) if col % 2 else col / -2)
            if slot[0] is not None:
                try:
                    height = math.ceil(math.log(slot[1], HEROKU_ROUTER_TIMEOUT) * matrix.height)
                except ValueError:
                    pass
                else:
                    for y in range(height):
                        if 300 > slot[0].status >= 200:
                            color = int(0 + cs * y), int(255 - cs * y), 0
                        elif 400 > slot[0].status >= 300:
                            color = int(0 + cs * y), 0, int(255 - cs * y)
                        elif 500 > slot[0].status >= 400:
                            color = 255, 255, 0
                        elif slot[0].status >= 500:
                            color = 255, 0, 0
                        else:
                            color = 0, 0, 0
                        try:
                            matrix.SetPixel(col, matrix.height - y, *color)
                        except Exception:
                            pass
                if slot[0].service >= slot[1]:
                    slot[1] += 10
                else:
                    for row in range(matrix.height):
                        matrix.SetPixel(col, row, 0, 0, 0)
                    slot[0] = None
                    slot[1] = 0

        await asyncio.sleep(0.01) 
Example 17
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def __init__(self, titles, increasing, save_to_fp):
        assert len(titles) == len(increasing)
        n_plots = len(titles)
        self.titles = titles
        self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)])
        self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"]

        self.nb_points_max = 500
        self.save_to_fp = save_to_fp
        self.start_batch_idx = 0
        self.autolimit_y = False
        self.autolimit_y_multiplier = 5

        #self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))
        nrows = max(1, int(math.sqrt(n_plots)))
        ncols = int(math.ceil(n_plots / nrows))
        width = ncols * 10
        height = nrows * 10

        self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height))

        if nrows == 1 and ncols == 1:
            self.axes = [self.axes]
        else:
            self.axes = self.axes.flat

        title_to_ax = dict()
        for idx, (title, ax) in enumerate(zip(self.titles, self.axes)):
            title_to_ax[title] = ax
        self.title_to_ax = title_to_ax

        self.fig.tight_layout()
        self.fig.subplots_adjust(left=0.05) 
Example 18
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testCeil(self):
        self.assertRaises(TypeError, math.ceil)
        # These types will be int in py3k.
        self.assertEqual(float, type(math.ceil(1)))
        self.assertEqual(float, type(math.ceil(1L)))
        self.assertEqual(float, type(math.ceil(1.0)))
        self.ftest('ceil(0.5)', math.ceil(0.5), 1)
        self.ftest('ceil(1.0)', math.ceil(1.0), 1)
        self.ftest('ceil(1.5)', math.ceil(1.5), 2)
        self.ftest('ceil(-0.5)', math.ceil(-0.5), 0)
        self.ftest('ceil(-1.0)', math.ceil(-1.0), -1)
        self.ftest('ceil(-1.5)', math.ceil(-1.5), -1)
        self.assertEqual(math.ceil(INF), INF)
        self.assertEqual(math.ceil(NINF), NINF)
        self.assertTrue(math.isnan(math.ceil(NAN)))

        class TestCeil(object):
            def __float__(self):
                return 41.3
        class TestNoCeil(object):
            pass
        self.ftest('ceil(TestCeil())', math.ceil(TestCeil()), 42)
        self.assertRaises(TypeError, math.ceil, TestNoCeil())

        t = TestNoCeil()
        t.__ceil__ = lambda *args: args
        self.assertRaises(TypeError, math.ceil, t)
        self.assertRaises(TypeError, math.ceil, t, 0) 
Example 19
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testFloor(self):
        self.assertRaises(TypeError, math.floor)
        # These types will be int in py3k.
        self.assertEqual(float, type(math.floor(1)))
        self.assertEqual(float, type(math.floor(1L)))
        self.assertEqual(float, type(math.floor(1.0)))
        self.ftest('floor(0.5)', math.floor(0.5), 0)
        self.ftest('floor(1.0)', math.floor(1.0), 1)
        self.ftest('floor(1.5)', math.floor(1.5), 1)
        self.ftest('floor(-0.5)', math.floor(-0.5), -1)
        self.ftest('floor(-1.0)', math.floor(-1.0), -1)
        self.ftest('floor(-1.5)', math.floor(-1.5), -2)
        # pow() relies on floor() to check for integers
        # This fails on some platforms - so check it here
        self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167)
        self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167)
        self.assertEqual(math.ceil(INF), INF)
        self.assertEqual(math.ceil(NINF), NINF)
        self.assertTrue(math.isnan(math.floor(NAN)))

        class TestFloor(object):
            def __float__(self):
                return 42.3
        class TestNoFloor(object):
            pass
        self.ftest('floor(TestFloor())', math.floor(TestFloor()), 42)
        self.assertRaises(TypeError, math.floor, TestNoFloor())

        t = TestNoFloor()
        t.__floor__ = lambda *args: args
        self.assertRaises(TypeError, math.floor, t)
        self.assertRaises(TypeError, math.floor, t, 0) 
Example 20
Project: pyblish-win   Author: pyblish   File: test_compile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def check_stack_size(self, code):
        # To assert that the alleged stack size is not O(N), we
        # check that it is smaller than log(N).
        if isinstance(code, str):
            code = compile(code, "<foo>", "single")
        max_size = math.ceil(math.log(len(code.co_code)))
        self.assertLessEqual(code.co_stacksize, max_size) 
Example 21
Project: xadmin_bugfix   Author: vip68   File: layout.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert_field(self, f, counts):
        col_class = "col-sm-%d" % int(math.ceil(12 / counts))
        if not (isinstance(f, Field) or issubclass(f.__class__, Field)):
            f = layout.Field(f)
        if f.wrapper_class:
            f.wrapper_class += " %s" % col_class
        else:
            f.wrapper_class = col_class
        return f 
Example 22
Project: lora-sx1276   Author: raspberrypi-tw   File: DataPayload.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def decrypt_payload(self, key, direction, mic):
        k = int(math.ceil(len(self.payload) / 16.0))

        a = []
        for i in range(k):
            a += [0x01]
            a += [0x00, 0x00, 0x00, 0x00]
            a += [direction]
            a += self.mac_payload.get_fhdr().get_devaddr()
            a += self.mac_payload.get_fhdr().get_fcnt()
            a += [0x00] # fcnt 32bit
            a += [0x00] # fcnt 32bit
            a += [0x00]
            a += [i+1]

        cipher = AES.new(bytes(key))
        s = cipher.encrypt(bytes(a))

        padded_payload = []
        for i in range(k):
            idx = (i + 1) * 16
            padded_payload += (self.payload[idx - 16:idx] + ([0x00] * 16))[:16]

        payload = []
        for i in range(len(self.payload)):
            payload += [s[i] ^ padded_payload[i]]
        return list(map(int, payload)) 
Example 23
Project: lora-sx1276   Author: raspberrypi-tw   File: DataPayload.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def encrypt_payload(self, key, direction, data):
        k = int(math.ceil(len(data) / 16.0))

        a = []
        for i in range(k):
            a += [0x01]
            a += [0x00, 0x00, 0x00, 0x00]
            a += [direction]
            a += self.mac_payload.get_fhdr().get_devaddr()
            a += self.mac_payload.get_fhdr().get_fcnt()
            a += [0x00] # fcnt 32bit
            a += [0x00] # fcnt 32bit
            a += [0x00]
            a += [i+1]

        cipher = AES.new(bytes(key))
        s = cipher.encrypt(bytes(a))

        padded_payload = []
        for i in range(k):
            idx = (i + 1) * 16
            padded_payload += (data[idx - 16:idx] + ([0x00] * 16))[:16]

        payload = []
        for i in range(len(data)):
            payload += [s[i] ^ padded_payload[i]]
        return list(map(int, payload)) 
Example 24
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: network.py    MIT License 5 votes vote down vote up
def _anchor_component(self, height, width):
    # just to get the shape right
    #height = int(math.ceil(self._im_info.data[0, 0] / self._feat_stride[0]))
    #width = int(math.ceil(self._im_info.data[0, 1] / self._feat_stride[0]))
    anchors, anchor_length = generate_anchors_pre(\
                                          height, width,
                                           self._feat_stride, self._anchor_scales, self._anchor_ratios)
    self._anchors = Variable(torch.from_numpy(anchors).cuda())
    self._anchor_length = anchor_length 
Example 25
Project: comet-commonsense   Author: atcbosselut   File: atomic.py    Apache License 2.0 5 votes vote down vote up
def select_partial_dataset(data_opts, data):
    num_selections = math.ceil(data_opts.kr * len(data))
    return random.sample(data, num_selections) 
Example 26
Project: django-xadmin   Author: MarkHoo   File: layout.py    MIT License 5 votes vote down vote up
def convert_field(self, f, counts):
        col_class = "col-sm-%d" % int(math.ceil(12 / counts))
        if not (isinstance(f, Field) or issubclass(f.__class__, Field)):
            f = layout.Field(f)
        if f.wrapper_class:
            f.wrapper_class += " %s" % col_class
        else:
            f.wrapper_class = col_class
        return f 
Example 27
Project: django-xadmin   Author: MarkHoo   File: layout.py    MIT License 5 votes vote down vote up
def convert_field(self, f, counts):
        col_class = "col-sm-%d" % int(math.ceil(12 / counts))
        if not (isinstance(f, Field) or issubclass(f.__class__, Field)):
            f = layout.Field(f)
        if f.wrapper_class:
            f.wrapper_class += " %s" % col_class
        else:
            f.wrapper_class = col_class
        return f 
Example 28
Project: django-xadmin   Author: MarkHoo   File: layout.py    MIT License 5 votes vote down vote up
def convert_field(self, f, counts):
        col_class = "col-sm-%d" % int(math.ceil(12 / counts))
        if not (isinstance(f, Field) or issubclass(f.__class__, Field)):
            f = layout.Field(f)
        if f.wrapper_class:
            f.wrapper_class += " %s" % col_class
        else:
            f.wrapper_class = col_class
        return f 
Example 29
Project: iSDX   Author: sdn-ixp   File: supersets.py    Apache License 2.0 5 votes vote down vote up
def recompute_all_supersets(self, pctrl):

        self.logger.debug("~Recomputing all Supersets...")

        self.rulecounts = self.recompute_rulecounts(pctrl)
        # get all sets of participants advertising the same prefix
        peer_sets = get_prefix2part_sets(pctrl)
        peer_sets = clear_inactive_parts(peer_sets, self.rulecounts.keys())
        peer_sets = removeSubsets(peer_sets)

        self.supersets = minimize_ss_rules_greedy(peer_sets, self.rulecounts, self.max_initial_bits)

        # impose an ordering on each superset by converting sets to lists
        for i in range(len(self.supersets)):
            self.supersets[i] = list(self.supersets[i])

        # if there is more than one superset, set the id size appropriately
        self.id_size = 1
        if len(self.supersets) > 1:
            self.id_size = int(math.ceil(math.log(len(self.supersets), 2)))
            
        # fix the mask size based on the id size
        self.mask_size = self.max_bits - self.id_size

        # in the unlikely case that there are more participants for a prefix than can fit in
        # the mask, truncate the list of participants (this may still be very broken)
        for superset in self.supersets:
            if len(superset) > self.mask_size:
                self.logger.warn('Superset too big!  Dropping participants.')
                del(superset[self.mask_size:])

        self.logger.debug("done.~")
        self.logger.debug("Supersets: >> "+str(self.supersets)) 
Example 30
Project: iSDX   Author: sdn-ixp   File: ss_lib.py    Apache License 2.0 5 votes vote down vote up
def bitsRequired(supersets):
    """ How many bits are needed to represent any set in this construction?
    """
    if supersets is None:
    	return 0

    logM = 1
    if len(supersets) > 1:
        logM = math.ceil(math.log(len(supersets), 2))
    maxS = max(len(superset) for superset in supersets)

    return int(logM + maxS) 
Example 31
Project: Neural-LP   Author: fanyangxyz   File: data.py    MIT License 5 votes vote down vote up
def _count_batch(self, samples, batch_size):
        relations = zip(*samples)[0]
        relations_counts = Counter(relations)
        num_batches = [ceil(1. * x / batch_size) for x in relations_counts.values()]
        return int(sum(num_batches)) 
Example 32
Project: kuaa   Author: rafaelwerneck   File: subset.py    GNU General Public License v3.0 5 votes vote down vote up
def stratified_selection(dataset, subset_size):
	labels = [line.split(None,1)[0] for line in open(dataset)]
	label_linenums = defaultdict(list)
	for i, label in enumerate(labels):
		label_linenums[label] += [i]

	l = len(labels)
	remaining = subset_size
	ret = []

	# classes with fewer data are sampled first; otherwise
	# some rare classes may not be selected
	for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])):
		linenums = label_linenums[label]
		label_size = len(linenums) 
		# at least one instance per class
		s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l)))))
		if s == 0:
			sys.stderr.write('''\
Error: failed to have at least one instance per class
    1. You may have regression data.
    2. Your classification data is unbalanced or too small.
Please use -s 1.
''')
			sys.exit(-1)
		remaining -= s
		ret += [linenums[i] for i in random.sample(xrange(label_size), s)]
	return sorted(ret) 
Example 33
Project: plugin.video.lynda   Author: davejm   File: lynda_api.py    GNU General Public License v2.0 5 votes vote down vote up
def course_thumb(self, course_id):
        width = 480
        endpoint = '/course/{0}/thumb'.format(course_id)
        params = {
            "w": width,
            "h": int(math.ceil(width / 1.7777778)),
            "colorHex": "000000"
        }
        resp = self._get(endpoint, params)
        return resp.url 
Example 34
Project: schwebedraht   Author: see-base   File: main.py    GNU General Public License v3.0 5 votes vote down vote up
def punkte_setzen(aktuelle_zeit, letzte_zeit):
    # Dokumentation zum Punktesystem unter https://github.com/see-base/schwebedraht/blob/master/PUNKTE.md
    global punkte, p_multiplikator

    pin1, zeit1 = aktuelle_zeit
    pin2, zeit2 = letzte_zeit

    if pin1 in segmente["bonus"] and pin2 != pin1: # zwei mal das gleiche bonus-segment berühren wird hiermit vermieden
        if zeit1 - zeit2 <= 5:   # m. verdoppelt sich bis 5 sek
            p_multiplikator *= 2
        elif zeit1 - zeit2 <= 10: # m. erhoeht sich um 1 bis 10 sek
            p_multiplikator += 1
        elif zeit1 - zeit2 >= 15: # m. wird zurueckgesetzt ab 15 sek
            p_multiplikator = 1

        punkte += randint(10, 50) * p_multiplikator # punke setzen

    elif pin1 in segmente["fail"]:
        if p_multiplikator > 1: # bei beruehrung wird m. halbiert
            p_multiplikator = math.ceil(p_multiplikator / 2)

    beruehrt = 0
    for e in zeitenListe:
        if e[0] in segmente["fail"]:
            beruehrt += 1
    sock.send(bytes("medien/punkte/punkte:{} | {}".format(punkte, beruehrt), "UTF-8"))
    if debug: print("Punkte: {} | Multiplikator: {} | Berührungen: {}".format(punkte, p_multiplikator, beruehrt)) 
Example 35
Project: mmdetection   Author: open-mmlab   File: sampler.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, dataset, samples_per_gpu=1):
        assert hasattr(dataset, 'flag')
        self.dataset = dataset
        self.samples_per_gpu = samples_per_gpu
        self.flag = dataset.flag.astype(np.int64)
        self.group_sizes = np.bincount(self.flag)
        self.num_samples = 0
        for i, size in enumerate(self.group_sizes):
            self.num_samples += int(np.ceil(
                size / self.samples_per_gpu)) * self.samples_per_gpu 
Example 36
Project: mmdetection   Author: open-mmlab   File: sampler.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 dataset,
                 samples_per_gpu=1,
                 num_replicas=None,
                 rank=None):
        _rank, _num_replicas = get_dist_info()
        if num_replicas is None:
            num_replicas = _num_replicas
        if rank is None:
            rank = _rank
        self.dataset = dataset
        self.samples_per_gpu = samples_per_gpu
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0

        assert hasattr(self.dataset, 'flag')
        self.flag = self.dataset.flag
        self.group_sizes = np.bincount(self.flag)

        self.num_samples = 0
        for i, j in enumerate(self.group_sizes):
            self.num_samples += int(
                math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
                          self.num_replicas)) * self.samples_per_gpu
        self.total_size = self.num_samples * self.num_replicas 
Example 37
Project: mmdetection   Author: open-mmlab   File: sampler.py    Apache License 2.0 5 votes vote down vote up
def __iter__(self):
        # deterministically shuffle based on epoch
        g = torch.Generator()
        g.manual_seed(self.epoch)

        indices = []
        for i, size in enumerate(self.group_sizes):
            if size > 0:
                indice = np.where(self.flag == i)[0]
                assert len(indice) == size
                indice = indice[list(torch.randperm(int(size),
                                                    generator=g))].tolist()
                extra = int(
                    math.ceil(
                        size * 1.0 / self.samples_per_gpu / self.num_replicas)
                ) * self.samples_per_gpu * self.num_replicas - len(indice)
                # pad indice
                tmp = indice.copy()
                for _ in range(extra // size):
                    indice.extend(tmp)
                indice.extend(tmp[:extra % size])
                indices.extend(indice)

        assert len(indices) == self.total_size

        indices = [
            indices[j] for i in list(
                torch.randperm(
                    len(indices) // self.samples_per_gpu, generator=g))
            for j in range(i * self.samples_per_gpu, (i + 1) *
                           self.samples_per_gpu)
        ]

        # subsample
        offset = self.num_samples * self.rank
        indices = indices[offset:offset + self.num_samples]
        assert len(indices) == self.num_samples

        return iter(indices) 
Example 38
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 5 votes vote down vote up
def combine_images(generated_images):
    num = generated_images.shape[0]
    width = int(math.sqrt(num))
    height = int(math.ceil(float(num)/width))
    shape = generated_images.shape[1:3]
    image = np.zeros((height*shape[0], width*shape[1]),
                     dtype=generated_images.dtype)
    for index, img in enumerate(generated_images):
        i = int(index/width)
        j = index % width
        image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \
            img[:, :, 0]
    return image 
Example 39
Project: neural-fingerprinting   Author: StephanZheng   File: gen_noisy.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'cifar'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 40
Project: neural-fingerprinting   Author: StephanZheng   File: gen_whitebox_adv.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'cifar'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 41
Project: neural-fingerprinting   Author: StephanZheng   File: gen_whitebox_adv.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'mnist'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      dict_nat = {model.x_input: x_batch,
                  model.y_input: y_batch}

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 42
Project: pinax-documents   Author: pinax   File: utils.py    MIT License 5 votes vote down vote up
def convert_bytes(bytes):
    bytes = float(bytes)
    if bytes >= 1099511627776:
        size, srepr = bytes / 1099511627776, "TB"
    elif bytes >= 1073741824:
        size, srepr = bytes / 1073741824, "GB"
    elif bytes >= 1048576:
        size, srepr = bytes / 1048576, "MB"
    elif bytes >= 1024:
        size, srepr = bytes / 1024, "KB"
    else:
        size, srepr = bytes, " bytes"
    return "%d%s" % (math.ceil(size), srepr) 
Example 43
Project: pinax-documents   Author: pinax   File: models.py    MIT License 5 votes vote down vote up
def percentage(self):
        return int(math.ceil((float(self.bytes_used) / self.bytes_total) * 100)) 
Example 44
Project: MusicDownloader   Author: wwwpf   File: search_manager.py    GNU General Public License v3.0 5 votes vote down vote up
def get_page_num(self):
        return math.ceil(self.total_song / self.spider.get_query_num())\
                            if self.spider else\
                            0 
Example 45
Project: CLRS   Author: JasonVann   File: Fibonacci_Heap.py    MIT License 5 votes vote down vote up
def D(n):
    import math
    thi = 0.5*(math.sqrt(5)+1)
    return math.ceil(math.log(n, thi)) 
Example 46
Project: CLRS   Author: JasonVann   File: vEB_Tree.py    MIT License 5 votes vote down vote up
def __init__(self, u):
        self.min = None
        self.max = None
        self.u = u
        self.summary = None
        self.ul = math.floor(math.sqrt(self.u))
        self.uh = math.ceil(math.sqrt(self.u))
        #self.cluster = [None] * self.uh
        if not self.is_leaf():
            self.summary = vEB_Tree(self.uh)
            self.cluster = []
            for x in xrange(self.uh):
                self.cluster.append(vEB_Tree(self.ul)) 
Example 47
Project: CLRS   Author: JasonVann   File: Heap.py    MIT License 5 votes vote down vote up
def parent(self, i):
        '''
        Returns the parent of the given node
        Eg. data[6] -> data[2], data[5] -> data[2]
        '''
        if i == 0:
            # Root doesn't have a parent
            return None

        idx_parent = math.ceil(i/2) - 1
        return idx_parent 
Example 48
Project: CLRS   Author: JasonVann   File: build_max_heap.py    MIT License 5 votes vote down vote up
def build_max_heap(A:Heap):
    max_internal_idx = math.ceil(A.size/2)
    for i in range(max_internal_idx, -1, -1):
        max_heapify_recur(A, i) 
Example 49
Project: programsynthesishunting   Author: flexgp   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def percentile(sorted_list, p):
    """
    Returns the element corresponding to the p-th percentile
    in a sorted list

    :param sorted_list: The sorted list
    :param p: The percetile
    :return: The element corresponding to the percentile
    """

    return sorted_list[ceil(len(sorted_list) * p / 100) - 1] 
Example 50
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: util.py    MIT License 5 votes vote down vote up
def int_to_bytes(value, signed=False, width=None):
        """
        Converts an integer to a byte string

        :param value:
            The integer to convert

        :param signed:
            If the byte string should be encoded using two's complement

        :param width:
            None == auto, otherwise an integer of the byte width for the return
            value

        :return:
            A byte string
        """

        if width is None:
            if signed:
                if value < 0:
                    bits_required = abs(value + 1).bit_length()
                else:
                    bits_required = value.bit_length()
                if bits_required % 8 == 0:
                    bits_required += 1
            else:
                bits_required = value.bit_length()
            width = math.ceil(bits_required / 8) or 1
        return value.to_bytes(width, byteorder='big', signed=signed) 
Example 51
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 5 votes vote down vote up
def byte_size(self):
        """
        :return:
            The byte size of the public key, as an integer
        """

        return int(math.ceil(self.bit_size / 8)) 
Example 52
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: selectors.py    MIT License 5 votes vote down vote up
def _wrap_poll(self, timeout=None):
            """ Wrapper function for select.poll.poll() so that
            _syscall_wrapper can work with only seconds. """
            if timeout is not None:
                if timeout <= 0:
                    timeout = 0
                else:
                    # select.poll.poll() has a resolution of 1 millisecond,
                    # round away from zero to wait *at least* timeout seconds.
                    timeout = math.ceil(timeout * 1e3)

            result = self._poll.poll(timeout)
            return result 
Example 53
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: selectors.py    MIT License 5 votes vote down vote up
def select(self, timeout=None):
            if timeout is not None:
                if timeout <= 0:
                    timeout = 0.0
                else:
                    # select.epoll.poll() has a resolution of 1 millisecond
                    # but luckily takes seconds so we don't need a wrapper
                    # like PollSelector. Just for better rounding.
                    timeout = math.ceil(timeout * 1e3) * 1e-3
                timeout = float(timeout)
            else:
                timeout = -1.0  # epoll.poll() must have a float.

            # We always want at least 1 to ensure that select can be called
            # with no file descriptors registered. Otherwise will fail.
            max_events = max(len(self._fd_to_key), 1)

            ready = []
            fd_events = _syscall_wrapper(self._epoll.poll, True,
                                         timeout=timeout,
                                         maxevents=max_events)
            for fd, event_mask in fd_events:
                events = 0
                if event_mask & ~select.EPOLLIN:
                    events |= EVENT_WRITE
                if event_mask & ~select.EPOLLOUT:
                    events |= EVENT_READ

                key = self._key_from_fd(fd)
                if key:
                    ready.append((key, events & key.events))
            return ready 
Example 54
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: __init__.py    MIT License 5 votes vote down vote up
def eta(self):
        return int(ceil(self.avg * self.remaining)) 
Example 55
Project: Random-Erasing   Author: zhunzhong07   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def eta(self):
        return int(ceil(self.avg * self.remaining)) 
Example 56
Project: esp-sdk-python   Author: EvidentSecurity   File: base.py    MIT License 5 votes vote down vote up
def _build_links(self, data, page):
        current_page = page['number']
        last_page = math.ceil(float(len(data)) / page['size'])
        links = {"self": "http://localhost:3000/api/v2/not_the_real_url/but_useful_for_testing.json?page%5Bnumber%5D={0}&page%5Bsize%5D={1}".format(current_page, page['size'])}
        if current_page != 1:
            links["prev"] = "http://localhost:3000/api/v2/not_the_real_url/but_useful_for_testing.json?page%5Bnumber%5D={0}&page%5Bsize%5D={1}".format(current_page - 1, page['size'])
        if current_page != last_page:
            links["next"] = "http://localhost:3000/api/v2/not_the_real_url/but_useful_for_testing.json?page%5Bnumber%5D={0}&page%5Bsize%5D={1}".format(current_page + 1, page['size'])
            links["last"] = "http://localhost:3000/api/v2/not_the_real_url/but_useful_for_testing.json?page%5Bnumber%5D={0}&page%5Bsize%5D={1}".format(last_page, page['size'])
        return links 
Example 57
Project: L   Author: vaultah   File: images.py    MIT License 5 votes vote down vote up
def setcover(self):
        path = consts.MEDIA_IMAGES / '{0}-{1}'.format(consts.COVER_IMAGE , self.name)
        if not path.exists():
            ratio = consts.COVER_RATIO[1] / consts.COVER_RATIO[0]
            nh = math.ceil(self.file.size[0] * ratio)
            cr = ImageOps.fit(self.file, (self.file.size[0], nh), BaseImage.ANTIALIAS)
            cr.save(str(path), quality=100) 
Example 58
Project: flasky   Author: RoseOu   File: __init__.py    MIT License 5 votes vote down vote up
def pages(self):
        """The total number of pages"""
        if self.per_page == 0:
            pages = 0
        else:
            pages = int(ceil(self.total / float(self.per_page)))
        return pages 
Example 59
Project: flasky   Author: RoseOu   File: selectors.py    MIT License 5 votes vote down vote up
def select(self, timeout=None):
            if timeout is None:
                timeout = -1
            elif timeout <= 0:
                timeout = 0
            else:
                # epoll_wait() has a resolution of 1 millisecond, round away
                # from zero to wait *at least* timeout seconds.
                timeout = math.ceil(timeout * 1e3) * 1e-3
            max_ev = len(self._fd_to_key)
            ready = []
            try:
                fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev)
            except InterruptedError:
                return ready
            for fd, event in fd_event_list:
                events = 0
                if event & ~select.EPOLLIN:
                    events |= EVENT_WRITE
                if event & ~select.EPOLLOUT:
                    events |= EVENT_READ

                key = self._key_from_fd(fd)
                if key:
                    ready.append((key, events & key.events))
            return ready 
Example 60
Project: core   Author: lifemapper   File: gbifquery.py    GNU General Public License v3.0 5 votes vote down vote up
def isReady(self):
      """
      @note: Never call this when in possession of a lock
      """
      if self._nubUUID is None:
         self._waitForGBIF()
      elif self._gbifQueryTime is not None:
         timeLeft = dt.DateTimeDelta(GBIF.WAIT_TIME - 
                                     (dt.gmt().mjd - self._gbifQueryTime))
         if timeLeft > 0:
            secondsLeft = ceil(timeLeft.seconds)
            self.log.info('Give GBIF a %d second break ...' % secondsLeft)
            time.sleep(secondsLeft)
            
# ............................................... 
Example 61
Project: core   Author: lifemapper   File: gbifquery.py    GNU General Public License v3.0 5 votes vote down vote up
def _signInToGBIF(self, usr, pword):
      # create a password manager
      if usr is not None or pword is not None:
         passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
         passwordMgr.add_password('GBIF', GBIF.REST_URL, usr, pword)
         handler = urllib2.HTTPBasicAuthHandler(passwordMgr)
         
         # Create and Install opener - now all calls to urllib2.urlopen use our opener.
         opener = urllib2.build_opener(handler)
         urllib2.install_opener(opener)
         self.signedIn = True
      
# # ...............................................
#    def _giveGbifABreak(self):
#       """
#       @note: Never call this when in possession of the lock
#       """
#       if self._gbifQueryTime is not None:
#          timeLeft = dt.DateTimeDelta(GBIF.WAIT_TIME - 
#                                      (dt.gmt().mjd - self._gbifQueryTime))
#          if timeLeft > 0:
#             secondsLeft = ceil(timeLeft.seconds)
#             self.log.info('Give GBIF a %d second break ...' % secondsLeft)
#             time.sleep(secondsLeft)
            
# ............................................... 
Example 62
Project: fuku-ml   Author: fukuball   File: Utility.py    MIT License 5 votes vote down vote up
def excute(self):

        for model in self.models:

            avg_error = 0

            validate_num = int(math.ceil(len(model.train_Y) / 10))

            model.train_Y = np.reshape(model.train_Y, (-1, 1))
            dataset = np.concatenate((model.train_X, model.train_Y), axis=1)
            np.random.shuffle(dataset)

            error = 0

            for i in range(10):

                model.train_X = np.concatenate((dataset[(i + 1) * validate_num:, :-1], dataset[:i * validate_num, :-1]), axis=0)
                model.train_Y = np.concatenate((dataset[(i + 1) * validate_num:, -1], dataset[:i * validate_num, -1]), axis=0)
                model.init_W()
                model.train()
                validate_X = dataset[i * validate_num:(i + 1) * validate_num, :-1]
                validate_Y = dataset[i * validate_num:(i + 1) * validate_num, -1]

                if hasattr(model, 'class_list'):
                    error = error + model.calculate_avg_error_all_class(validate_X, validate_Y, model.W)
                else:
                    error = error + model.calculate_avg_error(validate_X, validate_Y, model.W)

            model.train_X = dataset[:, :-1]
            model.train_Y = dataset[:, -1]

            dataset = None
            avg_error = error / 10
            self.avg_errors.append(avg_error)

        return self.avg_errors 
Example 63
Project: DeepLab_v3_plus   Author: songdejia   File: deeplab.py    MIT License 5 votes vote down vote up
def forward(self, input):#input 1, 3, 512, 512
        x, low_level_features = self.resnet_features(input)#final_x:[1, 2048, 32, 32]  low_level_features:[1,256, 128, 128]
        x1 = self.aspp1(x)   #[1, 256, 32, 32]
        x2 = self.aspp2(x)   #[1, 256, 32, 32]
        x3 = self.aspp3(x)   #[1, 256, 32, 32]
        x4 = self.aspp4(x)   #[1, 256, 32, 32]
        x5 = self.global_avg_pool(x) #[1, 256, 1, 1]
        x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)

        x = torch.cat((x1, x2, x3, x4, x5), dim=1)

        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = F.interpolate(x, size=(int(math.ceil(input.size()[-2]/4)), int(math.ceil(input.size()[-1]/4))), mode='bilinear', align_corners=True)

        low_level_features = self.conv2(low_level_features)
        low_level_features = self.bn2(low_level_features)
        low_level_features = self.relu(low_level_features)

        x = torch.cat((x, low_level_features), dim=1)
        x = self.last_conv(x)

        x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)

        return x 
Example 64
Project: pastebin-monitor   Author: fabiospampinato   File: pastebin_crawler.py    GNU General Public License v2.0 5 votes vote down vote up
def start ( self, refresh_time = 30, delay = 1, ban_wait = 5, flush_after_x_refreshes=100, connection_timeout=60 ):
        count = 0
        while True:
            status,pastes = self.get_pastes ()

            start_time = time.time()
            if status == self.OK:
                for paste in pastes:
                    paste_id = PyQuery ( paste ).attr('href')
                    self.new_checked_ids.append ( paste_id )
                    if paste_id not in self.prev_checked_ids:
                        self.check_paste ( paste_id )
                        time.sleep ( delay )
                    count += 1

                if count == flush_after_x_refreshes:
                    self.prev_checked_ids = self.new_checked_ids
                    count = 0
                else:
                    self.prev_checked_ids += self.new_checked_ids
                self.new_checked_ids = []

                elapsed_time = time.time() - start_time
                sleep_time = ceil(max(0,(refresh_time - elapsed_time)))
                if sleep_time > 0:
                    Logger().log('Waiting {:d} seconds to refresh...'.format(sleep_time), True)
                    time.sleep ( sleep_time )
            elif status == self.ACCESS_DENIED:
                Logger ().log ( 'Damn! It looks like you have been banned (probably temporarily)', True, 'YELLOW' )
                for n in range ( 0, ban_wait ):
                    Logger ().log ( 'Please wait ' + str ( ban_wait - n ) + ' minute' + ( 's' if ( ban_wait - n ) > 1 else '' ) )
                    time.sleep ( 60 )
            elif status == self.CONNECTION_FAIL:
                Logger().log ( 'Connection down. Waiting {:d} seconds and trying again'.format(connection_timeout), True, 'RED')
                time.sleep(connection_timeout)
            elif status == self.OTHER_ERROR:
                Logger().log('Unknown error. Maybe an encoding problem? Trying again.'.format(connection_timeout), True,'RED')
                time.sleep(1) 
Example 65
Project: treelstm.pytorch   Author: dasguptar   File: utils.py    MIT License 5 votes vote down vote up
def map_label_to_target(label, num_classes):
    target = torch.zeros(1, num_classes, dtype=torch.float, device='cpu')
    ceil = int(math.ceil(label))
    floor = int(math.floor(label))
    if ceil == floor:
        target[0, floor-1] = 1
    else:
        target[0, floor-1] = ceil - label
        target[0, ceil-1] = label - floor
    return target 
Example 66
Project: synthetic-data-tutorial   Author: theodi   File: PrivBayes.py    MIT License 5 votes vote down vote up
def calculate_k(num_attributes, num_tuples, target_usefulness=4, epsilon=0.1):
    """Calculate the maximum degree when constructing Bayesian networks. See PrivBayes Lemma 3."""
    default_k = 3
    initial_usefulness = usefulness_minus_target(default_k, num_attributes, num_tuples, 0, epsilon)
    if initial_usefulness > target_usefulness:
        return default_k
    else:
        arguments = (num_attributes, num_tuples, target_usefulness, epsilon)
        warnings.filterwarnings("error")
        try:
            ans = fsolve(usefulness_minus_target, int(num_attributes / 2), args=arguments)[0]
            ans = ceil(ans)
        except RuntimeWarning:
            print("Warning: k is not properly computed!")
            ans = default_k
        if ans < 1 or ans > num_attributes:
            ans = default_k
        return ans 
Example 67
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: fit.py    Apache License 2.0 5 votes vote down vote up
def get_epoch_size(args, kv):
    return math.ceil(int(args.num_examples / kv.num_workers) / args.batch_size) 
Example 68
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: main.py    Apache License 2.0 5 votes vote down vote up
def to_target(x):
    target = np.zeros((1, num_classes))
    ceil = int(math.ceil(x))
    floor = int(math.floor(x))
    if ceil==floor:
        target[0][floor-1] = 1
    else:
        target[0][floor-1] = ceil - x
        target[0][ceil-1] = x - floor
    return mx.nd.array(target) 
Example 69
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: fit.py    Apache License 2.0 5 votes vote down vote up
def get_epoch_size(args, kv):
    return math.ceil(int(args.num_examples / kv.num_workers) / args.batch_size) 
Example 70
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: rl_data.py    Apache License 2.0 5 votes vote down vote up
def visual(X, show=True):
    X = X.transpose((0, 2, 3, 1))
    N = X.shape[0]
    n = int(math.ceil(math.sqrt(N)))
    h = X.shape[1]
    w = X.shape[2]
    buf = np.zeros((h*n, w*n, X.shape[3]), dtype=np.uint8)
    for i in range(N):
        x = i%n
        y = i//n
        buf[h*y:h*(y+1), w*x:w*(x+1), :] = X[i]
    if show:
        cv2.imshow('a', buf)
        cv2.waitKey(1)
    return buf 
Example 71
Project: cat-bbs   Author: aleju   File: common.py    MIT License 4 votes vote down vote up
def to_aspect_ratio_add(image, target_ratio, pad_mode="constant", pad_cval=0, return_paddings=False):
    """Resize an image to a desired aspect ratio by adding pixels to it
    (usually black ones, i.e. zero-padding)."""
    height = image.shape[0]
    width = image.shape[1]
    ratio = width / height

    pad_top = 0
    pad_bottom = 0
    pad_left = 0
    pad_right = 0

    if ratio < target_ratio:
        # vertical image, height > width
        diff = (target_ratio * height) - width
        pad_left = int(math.ceil(diff / 2))
        pad_right = int(math.floor(diff / 2))
    elif ratio > target_ratio:
        # horizontal image, width > height
        diff = ((1/target_ratio) * width) - height
        pad_top = int(math.ceil(diff / 2))
        pad_bottom = int(math.floor(diff / 2))

    if any([val > 0 for val in [pad_top, pad_bottom, pad_left, pad_right]]):
        # constant_values creates error if pad_mode is not "constant"
        if pad_mode == "constant":
            image = np.pad(image, ((pad_top, pad_bottom), \
                                   (pad_left, pad_right), \
                                   (0, 0)), \
                                  mode=pad_mode, constant_values=pad_cval)
        else:
            image = np.pad(image, ((pad_top, pad_bottom), \
                                   (pad_left, pad_right), \
                                   (0, 0)), \
                                  mode=pad_mode)

    result_ratio = image.shape[1] / image.shape[0]
    assert target_ratio - 0.1 < result_ratio < target_ratio + 0.1, \
        "Wrong result ratio: " + str(result_ratio)

    if return_paddings:
        return image, (pad_top, pad_right, pad_bottom, pad_left)
    else:
        return image 
Example 72
Project: pybench   Author: pentschev   File: benchmark_array.py    Apache License 2.0 4 votes vote down vote up
def test_Stencil(benchmark, module, shape):

    m = importlib.import_module(module)

    @numba.stencil
    def _smooth(x):
        return (
            x[-1, -1]
            + x[-1, 0]
            + x[-1, 1]
            + x[0, -1]
            + x[0, 0]
            + x[0, 1]
            + x[1, -1]
            + x[1, 0]
            + x[1, 1]
        ) // 9

    @numba.njit
    def smooth_cpu(x, out):
        out = _smooth(x)

    @numba.cuda.jit
    def _smooth_gpu(x, out):
        i, j = numba.cuda.grid(2)
        n, m = x.shape
        if 1 <= i < n - 1 and 1 <= j < m - 1:
            out[i, j] = (
                x[i - 1, j - 1]
                + x[i - 1, j]
                + x[i - 1, j + 1]
                + x[i, j - 1]
                + x[i, j]
                + x[i, j + 1]
                + x[i + 1, j - 1]
                + x[i + 1, j]
                + x[i + 1, j + 1]
            ) // 9

    def smooth_gpu(x, out):
        import math

        threadsperblock = (16, 16)
        blockspergrid_x = math.ceil(x.shape[0] / threadsperblock[0])
        blockspergrid_y = math.ceil(x.shape[1] / threadsperblock[1])
        blockspergrid = (blockspergrid_x, blockspergrid_y)

        _smooth_gpu[blockspergrid, threadsperblock](x, out)

    data_func = lambda shape: {
        "in": m.ones(shape, dtype="int8"),
        "out": m.zeros(shape, dtype="int8"),
    }
    f = smooth_cpu if module == "numpy" else smooth_gpu
    compute_func = lambda data: f(data["in"], data["out"])

    run_benchmark(benchmark, m, compute_func, data_func, shape) 
Example 73
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: flow.py    MIT License 4 votes vote down vote up
def predict(self):
    inp_path = self.FLAGS.imgdir
    all_inps = os.listdir(inp_path)
    all_inps = [i for i in all_inps if self.framework.is_inp(i)]
    if not all_inps:
        msg = 'Failed to find any images in {} .'
        exit('Error: {}'.format(msg.format(inp_path)))

    batch = min(self.FLAGS.batch, len(all_inps))

    # predict in batches
    n_batch = int(math.ceil(len(all_inps) / batch))
    for j in range(n_batch):
        from_idx = j * batch
        to_idx = min(from_idx + batch, len(all_inps))

        # collect images input in the batch
        this_batch = all_inps[from_idx:to_idx]
        inp_feed = pool.map(lambda inp: (
            np.expand_dims(self.framework.preprocess(
                os.path.join(inp_path, inp)), 0)), this_batch)

        # Feed to the net
        feed_dict = {self.inp : np.concatenate(inp_feed, 0)}    
        self.say('Forwarding {} inputs ...'.format(len(inp_feed)))
        start = time.time()
        out = self.sess.run(self.out, feed_dict)
        stop = time.time(); last = stop - start
        self.say('Total time = {}s / {} inps = {} ips'.format(
            last, len(inp_feed), len(inp_feed) / last))

        # Post processing
        self.say('Post processing {} inputs ...'.format(len(inp_feed)))
        start = time.time()
        pool.map(lambda p: (lambda i, prediction:
            self.framework.postprocess(
               prediction, os.path.join(inp_path, this_batch[i])))(*p),
            enumerate(out))
        stop = time.time(); last = stop - start

        # Timing
        self.say('Total time = {}s / {} inps = {} ips'.format(
            last, len(inp_feed), len(inp_feed) / last)) 
Example 74
Project: uci-download-process   Author: cperales   File: fold_data.py    MIT License 4 votes vote down vote up
def k_folding(data_folder, log_file, file=None, classification=True):
    dir_file_pairs = dir_file(data_folder, file)

    # SPLITTING ONE DATASET FILE IN N_FOLDS
    n_fold = 10
    with open(log_file, 'w') as f:
        for dir_file_pair in dir_file_pairs:
            try:
                dir_name, file_name = dir_file_pair
                # print('Folding {}'.format(file_name))
                df_file = pd.read_csv(os.path.join(dir_name, file_name),
                                      sep='\s+',
                                      header=None)
                target_position = df_file.columns[-1]
                x = df_file[[i for i in range(target_position)]]
                y = df_file[[target_position]]
                # Testing if there is enough instances for n fold
                count = [np.count_nonzero(y == label) for label in np.unique(y)]
                if np.min(count) < 2:
                    raise ValueError('Not enough elements of one label')
                rep = np.max(count)  # If maximum is not enough to n fold
                if n_fold > rep:
                    times = math.ceil(n_fold / rep)
                    x = pd.concat(times * [x])
                    y = pd.concat(times * [y])
                # Shuffle false in order to preserve
                i = 0
                file = file_name.replace('.data', '')
                if classification is True:
                    kf = StratifiedKFold(n_splits=n_fold, shuffle=False)
                else:
                    kf = KFold(n_splits=n_fold, shuffle=True)
                for train_index, test_index in kf.split(X=x, y=y):
                    x_train_fold = x.iloc[train_index]
                    y_train_fold = y.iloc[train_index]
                    train_fold = pd.concat([x_train_fold, y_train_fold], axis=1)
                    train_fold_name = '.'.join(['_'.join(['train', file]), str(i)])
                    train_fold_name_path = os.path.join(dir_name, train_fold_name)
                    train_fold.to_csv(train_fold_name_path,
                                      sep=' ',
                                      header=None,
                                      index=False)

                    x_test_fold = x.iloc[test_index]
                    y_test_fold = y.iloc[test_index]
                    test_fold = pd.concat([x_test_fold, y_test_fold], axis=1)
                    test_fold_name = '.'.join(['_'.join(['test', file]), str(i)])
                    test_fold_name_path = os.path.join(dir_name, test_fold_name)
                    test_fold.to_csv(test_fold_name_path,
                                     sep=' ',
                                     header=None,
                                     index=False)

                    i += 1
            except ValueError as e:
                print(e, ', '
                         'so {} can\'t be stratified'.format(file_name))
                f.write(os.path.join('processed/', file_name))
                f.write('\n')
                shutil.rmtree(dir_name) 
Example 75
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: util.py    MIT License 4 votes vote down vote up
def int_to_bytes(value, signed=False, width=None):
        """
        Converts an integer to a byte string

        :param value:
            The integer to convert

        :param signed:
            If the byte string should be encoded using two's complement

        :param width:
            None == auto, otherwise an integer of the byte width for the return
            value

        :return:
            A byte string
        """

        # Handle negatives in two's complement
        is_neg = False
        if signed and value < 0:
            is_neg = True
            bits = int(math.ceil(len('%x' % abs(value)) / 2.0) * 8)
            value = (value + (1 << bits)) % (1 << bits)

        hex_str = '%x' % value
        if len(hex_str) & 1:
            hex_str = '0' + hex_str

        output = hex_str.decode('hex')

        if signed and not is_neg and ord(output[0:1]) & 0x80:
            output = b'\x00' + output

        if width is not None:
            if is_neg:
                pad_char = b'\xFF'
            else:
                pad_char = b'\x00'
            output = (pad_char * (width - len(output))) + output
        elif is_neg and ord(output[0:1]) & 0x80 == 0:
            output = b'\xFF' + output

        return output 
Example 76
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: _int.py    MIT License 4 votes vote down vote up
def inverse_mod(a, p):
            """
            Compute the modular inverse of a (mod p)

            :param a:
                An integer

            :param p:
                An integer

            :return:
                An integer
            """

            ctx = libcrypto.BN_CTX_new()

            a_bytes = int_to_bytes(abs(a))
            p_bytes = int_to_bytes(abs(p))

            a_buf = buffer_from_bytes(a_bytes)
            a_bn = libcrypto.BN_bin2bn(a_buf, len(a_bytes), null())
            if a < 0:
                libcrypto.BN_set_negative(a_bn, 1)

            p_buf = buffer_from_bytes(p_bytes)
            p_bn = libcrypto.BN_bin2bn(p_buf, len(p_bytes), null())
            if p < 0:
                libcrypto.BN_set_negative(p_bn, 1)

            r_bn = libcrypto.BN_mod_inverse(null(), a_bn, p_bn, ctx)
            r_len_bits = libcrypto.BN_num_bits(r_bn)
            r_len = int(math.ceil(r_len_bits / 8))
            r_buf = buffer_from_bytes(r_len)
            libcrypto.BN_bn2bin(r_bn, r_buf)
            r_bytes = bytes_from_buffer(r_buf, r_len)
            result = int_from_bytes(r_bytes)

            libcrypto.BN_free(a_bn)
            libcrypto.BN_free(p_bn)
            libcrypto.BN_free(r_bn)
            libcrypto.BN_CTX_free(ctx)

            return result 
Example 77
Project: deep-learning-note   Author: wdxtub   File: 2_tf_linear.py    MIT License 4 votes vote down vote up
def stochasticGradientDescent(X, Y, model, learningRate=0.01,
                              miniBatchFraction=0.01, epoch=10000, tol=1.e-6):
    method = tf.train.GradientDescentOptimizer(learning_rate=learningRate)
    optimizer = method.minimize(model['loss_function'])

    # 增加日志
    tf.summary.scalar('loss_function1', model['loss_function'])
    tf.summary.histogram('params1', model['model_params'])
    tf.summary.scalar('first_param1', tf.reduce_mean(model['model_params'][0]))
    tf.summary.scalar('last_param1', tf.reduce_mean(model['model_params'][-1]))
    summary = tf.summary.merge_all()
    # 程序运行结束后执行 tensorboard --logdir logs/
    summaryWriter = createSummaryWriter('logs/sto_gradient_descent')


    # TF 开始运行
    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)

    # 迭代梯度下降法
    step = 0
    batchSize = int(X.shape[0] * miniBatchFraction)
    batchNum = int(math.ceil(1 / miniBatchFraction))
    prevLoss = np.inf
    diff = np.inf
    # 当损失函数的变动小于阈值或达到最大循环次数,则停止迭代
    while (step < epoch) & (diff > tol):
        for i in range(batchNum):
            # 选取小批次训练数据
            batchX = X[i * batchSize: (i+1) * batchSize]
            batchY = Y[i * batchSize: (i+1) * batchSize]
            # 迭代模型参数
            sess.run([optimizer],
                     feed_dict={
                         model['independent_variable']: batchX,
                         model['dependent_variable']: batchY
                     })
            # 计算损失函数

            _, summaryStr, loss = sess.run(
                [summary, model['loss_function']],
                feed_dict={
                    model['independent_variable']: X,
                    model['dependent_variable']: Y
                }
            )
            summaryWriter.add_summary(summaryStr, step)
            # 计算损失函数的变动
            diff = abs(prevLoss - loss)
            prevLoss = loss
            if diff <= tol:
                break
        step += 1 
Example 78
Project: L   Author: vaultah   File: images.py    MIT License 4 votes vote down vote up
def _store_n_link(cls, acct, file, allow_gif=False):
        # `file` argument must be provided
        content = file.read()
        # Keep the original object unmodified.
        # It won't be used anywhere in this function
        file.seek(0)

        if len(content) > consts.MAX_IMAGE_SIZE:
            raise ValueError('Image is too large')

        try:
            # Try to get image type
            img = BaseImage.open(io.BytesIO(content))
            if img.format not in cls._allowed.union({'GIF'} if allow_gif else set()):
                raise ValueError
        except (IOError, ValueError) as e:
            raise ValueError('Invalid image type') from None


        name = '{}.{}'.format(utils.unique_id()[0], img.format.lower())
        sizes = (consts.ORIGINAL_IMAGE, consts.SQUARE_THUMBNAIL, consts.SHRINKED_IMAGE)
        names = [consts.MEDIA_IMAGES / '{}-{}'.format(x, name) for x in sizes]

        consts.MEDIA_IMAGES.mkdir(parents=True, exist_ok=True)
        
        # Save full image without changin' a byte
        with names[0].open('wb') as unmodified:
            unmodified.write(content)

        # Construct `PIL.Image` instance and make a thumbnail and a shrinked copy

        # Thumbnails are always square
        ImageOps.fit(img, (100, 100), BaseImage.ANTIALIAS).save(str(names[1]), quality=100)
        # Shrinked image is a fixed-width image derived from the full-size image
        # Don't modify GIF images
        if consts.SHRINKED_WIDTH < img.size[0] and img.format != 'GIF':
            nh = math.ceil(consts.SHRINKED_WIDTH / img.size[0] * img.size[1])
            shrinked = ImageOps.fit(img, (consts.SHRINKED_WIDTH, nh), BaseImage.ANTIALIAS)
            shrinked.save(str(names[2]), quality=100)
        else:
            with names[2].open('wb') as shrinked:
                shrinked.write(content)

        # Link the image to `acct`, create a new `Image` instance and return it
        data = {'name': name, 'owner': acct.id, 'id': utils.unique_id()[0], 'score': 0}
        cls.collection.insert_one(data)
        data['owner'] = acct
        data['file'] = img
        return data 
Example 79
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: utils.py    MIT License 4 votes vote down vote up
def sizeCorrectionImage(img, factor, imgSize):
# assumes that input image size is larger than minImgSize, except for z-dimension
# factor is important in order to resample image by 1/factor (e.g. due to slice thickness) without any errors
    size = img.GetSize()
    correction = False
    # check if bounding box size is multiple of 'factor' and correct if necessary
    # x-direction
    if (size[0])%factor != 0:
        cX = factor-(size[0]%factor)
        correction = True
    else:
        cX = 0
    # y-direction
    if (size[1])%factor != 0:
        cY = factor-((size[1])%factor)
        correction = True
    else:
        cY  = 0

    if (size[2]) !=imgSize:
        cZ = (imgSize-size[2])
        # if z image size is larger than maxImgsSize, crop it (customized to the data at hand. Better if ROI extraction crops image)
        if cZ <0:
            print('image gets filtered')
            cropFilter = sitk.CropImageFilter()
            cropFilter.SetUpperBoundaryCropSize([0,0,int(math.floor(-cZ/2))])
            cropFilter.SetLowerBoundaryCropSize([0,0,int(math.ceil(-cZ/2))])
            img = cropFilter.Execute(img)
            cZ=0
        else:
            correction = True
    else:
        cZ = 0

    # if correction is necessary, increase size of image with padding
    if correction:
        filter = sitk.ConstantPadImageFilter()
        filter.SetPadLowerBound([int(math.floor(cX/2)), int(math.floor(cY/2)), int(math.floor(cZ/2))])
        filter.SetPadUpperBound([math.ceil(cX/2), math.ceil(cY), math.ceil(cZ/2)])
        filter.SetConstant(0)
        outPadding = filter.Execute(img)
        return outPadding

    else:
        return img 
Example 80
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: mlp.py    Apache License 2.0 4 votes vote down vote up
def test_mlp(args):
    # get parameters
    prefix = './mlp'
    batch_size = 100
    pruning_switch_epoch = [int(i) for i in args.pruning_switch_epoch.split(',')]
    num_epoch = pruning_switch_epoch[-1]
    batches_per_epoch = ceil(60000.0/batch_size)
    weight_sparsity = args.weight_sparsity
    bias_sparsity = args.bias_sparsity
    weight_threshold = args.weight_threshold
    bias_threshold = args.bias_threshold
    if args.weight_sparsity:
        weight_sparsity = [float(i) for i in args.weight_sparsity.split(',')]
        bias_sparsity = [float(i) for i in args.bias_sparsity.split(',')]
    else:
        weight_threshold = [float(i) for i in args.weight_threshold.split(',')]
        bias_threshold = [float(i) for i in args.bias_threshold.split(',')]

    # get symbols and iterators
    sym = get_symbol()
    download_data()
    (train, val) = get_iters(batch_size)

    # fit model
    model = mx.mod.Module(
        sym,
        context=[mx.cpu(i) for i in range(2)],
        data_names=['data'],
        label_names=['sm_label'])
    optimizer_params = {
        'learning_rate'             : 0.1,
        'wd'                        : 0.004,
        'momentum'                  : 0.9,
        'pruning_switch_epoch'      : pruning_switch_epoch,
        'batches_per_epoch'         : batches_per_epoch,
        'weight_sparsity'           : weight_sparsity,
        'bias_sparsity'             : bias_sparsity,
        'weight_threshold'          : weight_threshold,
        'bias_threshold'            : bias_threshold}
    logging.info('Start training...')
    model.fit(train,
        eval_data=val,
        eval_metric='acc',
        epoch_end_callback=mx.callback.do_checkpoint(prefix),
        num_epoch=num_epoch,
        optimizer='sparsesgd',
        optimizer_params=optimizer_params)
    logging.info('Finish traning...')

    # remove files
    for i in range(num_epoch):
        os.remove('%s-%04d.params' % (prefix, i + 1))
    os.remove('%s-symbol.json' % prefix)