Python math.floor() Examples

The following are code examples for showing how to use math.floor(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: unicorn-hat-hd   Author: pimoroni   File: demo.py    MIT License 6 votes vote down vote up
def checker(x, y, step):
    x -= (u_width / 2)
    y -= (u_height / 2)
    angle = (step / 10.0)
    s = math.sin(angle)
    c = math.cos(angle)
    xs = x * c - y * s
    ys = x * s + y * c
    xs -= math.sin(step / 200.0) * 40.0
    ys -= math.cos(step / 200.0) * 40.0
    scale = step % 20
    scale /= 20
    scale = (math.sin(step / 50.0) / 8.0) + 0.25
    xs *= scale
    ys *= scale
    xo = abs(xs) - int(abs(xs))
    yo = abs(ys) - int(abs(ys))
    v = 0 if (math.floor(xs) + math.floor(ys)) % 2 else 1 if xo > .1 and yo > .1 else .5
    r, g, b = hue_to_rgb[step % 255]
    return (r * (v * 255), g * (v * 255), b * (v * 255))


# weeee waaaah 
Example 2
Project: clikit   Author: sdispater   File: progress_bar.py    MIT License 6 votes vote down vote up
def _formatter_bar(self):
        if self._max:
            complete_bars = math.floor(self._percent * self.bar_width)
        else:
            complete_bars = math.floor(self.get_progress() % self.bar_width)

        display = self.get_bar_character() * int(complete_bars)

        if complete_bars < self.bar_width:
            empty_bars = (
                self.bar_width
                - complete_bars
                - len(self._io.remove_format(self.progress_char))
            )
            display += self.progress_char + self.empty_bar_char * int(empty_bars)

        return display 
Example 3
Project: python-samples   Author: dek-odoo   File: dek_program072.py    Apache License 2.0 6 votes vote down vote up
def main(numlist, searchnum):
    bottom = 0
    top = len(numlist) - 1
    index = -1

    while top >= bottom and index == -1:
        mid = int(math.floor((top + bottom) / 2.0))

        if numlist[mid] > searchnum:
            top = mid - 1
        elif numlist[mid] == searchnum:
            index = mid
        else:
            bottom = mid + 1

    return index 
Example 4
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: resnext.py    MIT License 6 votes vote down vote up
def __init__(self, inplanes, planes, cardinality, base_width, stride=1, downsample=None):
        super(ResNeXtBottleneck, self).__init__()

        D = int(math.floor(planes * (base_width / 64.0)))
        C = cardinality

        self.conv_reduce = nn.Conv2d(inplanes, D * C, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn_reduce = nn.BatchNorm2d(D * C)

        self.conv_conv = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=cardinality,
                                   bias=False)
        self.bn = nn.BatchNorm2d(D * C)

        self.conv_expand = nn.Conv2d(D * C, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn_expand = nn.BatchNorm2d(planes * 4)

        self.downsample = downsample 
Example 5
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 6 votes vote down vote up
def train_valid_split(dataset, test_size=0.25, shuffle=False, random_seed=0):
    """ Return a list of splitted indices from a DataSet.
    Indices can be used with DataLoader to build a train and validation set.

    Arguments:
        A Dataset
        A test_size, as a float between 0 and 1 (percentage split) or as an int (fixed number split)
        Shuffling True or False
        Random seed
    """
    length = dataset.__len__()
    indices = list(range(1, length))

    if shuffle == True:
        random.seed(random_seed)
        random.shuffle(indices)

    if type(test_size) is float:
        split = floor(test_size * length)
    elif type(test_size) is int:
        split = test_size
    else:
        raise ValueError('%s should be an int or a float' % str)
    return indices[split:], indices[:split] 
Example 6
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: preprocessing.py    MIT License 6 votes vote down vote up
def sizeCorrectionBoundingBox(boundingBox, newSize, factor):
    # correct the start index according to the new size of the bounding box
    start = boundingBox[0:3]
    start = list(start)
    size = boundingBox[3:6]
    size = list(size)
    start[0] = max(0,start[0] - math.floor((newSize - size[0]) / 2))
    start[1] = max(0, start[1] - math.floor((newSize - size[1]) / 2))

    # check if BB start can be divided by the factor (essential if ROI needs to be extracted from non-isotropic image)
    if (start[0]) % factor != 0:
        cX = (start[0] % factor)
        newStart = start[0] - cX
        start[0] = int(newStart)

    # y-direction
    if (start[1]) % factor != 0:
        cY = (start[1] % factor)
        start[1] = int(start[1] - cY)

    size[0] = newSize
    size[1] = newSize

    return start, size 
Example 7
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: cifarresnext.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, channels, cardinality, bottleneck_width,
                 stride, downsample=False, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(CIFARBlock, self).__init__(**kwargs)
        D = int(math.floor(channels * (bottleneck_width / 64)))
        group_width = cardinality * D

        self.body = nn.HybridSequential(prefix='')
        self.body.add(nn.Conv2D(group_width, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(group_width, kernel_size=3, strides=stride, padding=1,
                                groups=cardinality, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(channels * 4, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))

        if downsample:
            self.downsample = nn.HybridSequential(prefix='')
            self.downsample.add(nn.Conv2D(channels * 4, kernel_size=1, strides=stride,
                                          use_bias=False))
            self.downsample.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        else:
            self.downsample = None 
Example 8
Project: cards.py   Author: jhauberg   File: util.py    MIT License 6 votes vote down vote up
def pretty_size(size_in_bytes: int) -> str:
    """ Return a pretty representation of a file size. """

    if size_in_bytes <= 0:
        return 'No content'

    sizes = ('B', 'KB', 'MB')

    size_index = int(math.floor(math.log(size_in_bytes, 1024)))
    size = round(size_in_bytes / math.pow(1024, size_index), 2)

    if size_index > len(sizes) - 1:
        return '>1 TB'

    size_format = sizes[size_index]

    return '{0:.{precision}f} {1}'.format(
        size, size_format, precision=(2 if size_index > 1 else 0)) 
Example 9
Project: autolims   Author: scottbecker   File: utils.py    MIT License 5 votes vote down vote up
def floor_volume(volume):
    """
    Return the math.floor of a volume in microliters
    """
    return ul(math.floor(volume.to('microliter').magnitude)) 
Example 10
Project: clikit   Author: sdispater   File: progress_bar.py    MIT License 5 votes vote down vote up
def _overwrite(self, message):
        """
        Overwrites a previous message to the output.
        """
        lines = message.split("\n")

        # Append whitespace to match the line's length
        if self._last_messages_length is not None:
            for i, line in enumerate(lines):
                if self._last_messages_length > len(self._io.remove_format(line)):
                    lines[i] = line.ljust(self._last_messages_length, "\x20")

        if self._should_overwrite:
            if isinstance(self._io.error_output, SectionOutput):
                lines_to_clear = (
                    int(math.floor(len(lines) / self._io.terminal_dimensions.width))
                    + self._format_line_count
                    + 1
                )
                self._io.error_output.clear(lines_to_clear)
            else:
                # move back to the beginning of the progress bar before redrawing it
                self._io.error("\x0D")

                if self._format_line_count:
                    self._io.error("\033[{}A".format(self._format_line_count))
        elif self._step > 0:
            # move to new line
            self._io.error_line("")

        self._io.error("\n".join(lines))
        self._io.error_output.flush()

        self._last_messages_length = 0

        for line in lines:
            length = len(self._io.remove_format(line))
            if length > self._last_messages_length:
                self._last_messages_length = length 
Example 11
Project: clikit   Author: sdispater   File: progress_bar.py    MIT License 5 votes vote down vote up
def _formatter_percent(self):
        return int(math.floor(self._percent * 100)) 
Example 12
Project: pyblish-win   Author: pyblish   File: fractions.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __floordiv__(a, b):
        """a // b"""
        # Will be math.floor(a / b) in 3.0.
        div = a / b
        if isinstance(div, Rational):
            # trunc(math.floor(div)) doesn't work if the rational is
            # more precise than a float because the intermediate
            # rounding may cross an integer boundary.
            return div.numerator // div.denominator
        else:
            return math.floor(div) 
Example 13
Project: pyblish-win   Author: pyblish   File: fractions.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __rfloordiv__(b, a):
        """a // b"""
        # Will be math.floor(a / b) in 3.0.
        div = a / b
        if isinstance(div, Rational):
            # trunc(math.floor(div)) doesn't work if the rational is
            # more precise than a float because the intermediate
            # rounding may cross an integer boundary.
            return div.numerator // div.denominator
        else:
            return math.floor(div) 
Example 14
Project: pyblish-win   Author: pyblish   File: aifc.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def _write_float(f, x):
    import math
    if x < 0:
        sign = 0x8000
        x = x * -1
    else:
        sign = 0
    if x == 0:
        expon = 0
        himant = 0
        lomant = 0
    else:
        fmant, expon = math.frexp(x)
        if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
            expon = sign|0x7FFF
            himant = 0
            lomant = 0
        else:                   # Finite
            expon = expon + 16382
            if expon < 0:           # denormalized
                fmant = math.ldexp(fmant, expon)
                expon = 0
            expon = expon | sign
            fmant = math.ldexp(fmant, 32)
            fsmant = math.floor(fmant)
            himant = long(fsmant)
            fmant = math.ldexp(fmant - fsmant, 32)
            fsmant = math.floor(fmant)
            lomant = long(fsmant)
    _write_ushort(f, expon)
    _write_ulong(f, himant)
    _write_ulong(f, lomant) 
Example 15
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testFloor(self):
        self.assertRaises(TypeError, math.floor)
        # These types will be int in py3k.
        self.assertEqual(float, type(math.floor(1)))
        self.assertEqual(float, type(math.floor(1L)))
        self.assertEqual(float, type(math.floor(1.0)))
        self.ftest('floor(0.5)', math.floor(0.5), 0)
        self.ftest('floor(1.0)', math.floor(1.0), 1)
        self.ftest('floor(1.5)', math.floor(1.5), 1)
        self.ftest('floor(-0.5)', math.floor(-0.5), -1)
        self.ftest('floor(-1.0)', math.floor(-1.0), -1)
        self.ftest('floor(-1.5)', math.floor(-1.5), -2)
        # pow() relies on floor() to check for integers
        # This fails on some platforms - so check it here
        self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167)
        self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167)
        self.assertEqual(math.ceil(INF), INF)
        self.assertEqual(math.ceil(NINF), NINF)
        self.assertTrue(math.isnan(math.floor(NAN)))

        class TestFloor(object):
            def __float__(self):
                return 42.3
        class TestNoFloor(object):
            pass
        self.ftest('floor(TestFloor())', math.floor(TestFloor()), 42)
        self.assertRaises(TypeError, math.floor, TestNoFloor())

        t = TestNoFloor()
        t.__floor__ = lambda *args: args
        self.assertRaises(TypeError, math.floor, t)
        self.assertRaises(TypeError, math.floor, t, 0) 
Example 16
Project: pyblish-win   Author: pyblish   File: test_long.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_bit_length(self):
        tiny = 1e-10
        for x in xrange(-65000, 65000):
            x = long(x)
            k = x.bit_length()
            # Check equivalence with Python version
            self.assertEqual(k, len(bin(x).lstrip('-0b')))
            # Behaviour as specified in the docs
            if x != 0:
                self.assertTrue(2**(k-1) <= abs(x) < 2**k)
            else:
                self.assertEqual(k, 0)
            # Alternative definition: x.bit_length() == 1 + floor(log_2(x))
            if x != 0:
                # When x is an exact power of 2, numeric errors can
                # cause floor(log(x)/log(2)) to be one too small; for
                # small x this can be fixed by adding a small quantity
                # to the quotient before taking the floor.
                self.assertEqual(k, 1 + math.floor(
                        math.log(abs(x))/math.log(2) + tiny))

        self.assertEqual((0L).bit_length(), 0)
        self.assertEqual((1L).bit_length(), 1)
        self.assertEqual((-1L).bit_length(), 1)
        self.assertEqual((2L).bit_length(), 2)
        self.assertEqual((-2L).bit_length(), 2)
        for i in [2, 3, 15, 16, 17, 31, 32, 33, 63, 64, 234]:
            a = 2L**i
            self.assertEqual((a-1).bit_length(), i)
            self.assertEqual((1-a).bit_length(), i)
            self.assertEqual((a).bit_length(), i+1)
            self.assertEqual((-a).bit_length(), i+1)
            self.assertEqual((a+1).bit_length(), i+1)
            self.assertEqual((-a-1).bit_length(), i+1) 
Example 17
Project: pyblish-win   Author: pyblish   File: test_int.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_bit_length(self):
        tiny = 1e-10
        for x in xrange(-65000, 65000):
            k = x.bit_length()
            # Check equivalence with Python version
            self.assertEqual(k, len(bin(x).lstrip('-0b')))
            # Behaviour as specified in the docs
            if x != 0:
                self.assertTrue(2**(k-1) <= abs(x) < 2**k)
            else:
                self.assertEqual(k, 0)
            # Alternative definition: x.bit_length() == 1 + floor(log_2(x))
            if x != 0:
                # When x is an exact power of 2, numeric errors can
                # cause floor(log(x)/log(2)) to be one too small; for
                # small x this can be fixed by adding a small quantity
                # to the quotient before taking the floor.
                self.assertEqual(k, 1 + math.floor(
                        math.log(abs(x))/math.log(2) + tiny))

        self.assertEqual((0).bit_length(), 0)
        self.assertEqual((1).bit_length(), 1)
        self.assertEqual((-1).bit_length(), 1)
        self.assertEqual((2).bit_length(), 2)
        self.assertEqual((-2).bit_length(), 2)
        for i in [2, 3, 15, 16, 17, 31, 32, 33, 63, 64]:
            a = 2**i
            self.assertEqual((a-1).bit_length(), i)
            self.assertEqual((1-a).bit_length(), i)
            self.assertEqual((a).bit_length(), i+1)
            self.assertEqual((-a).bit_length(), i+1)
            self.assertEqual((a+1).bit_length(), i+1)
            self.assertEqual((-a-1).bit_length(), i+1) 
Example 18
Project: LipNet-PyTorch   Author: sailordiary   File: augmentation.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def round(x):
    return math.floor(x + 0.5) 
Example 19
Project: backtrader-cn   Author: pandalibin   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def split_data(cls, data, percent=0.3):
        """
        Split the data into training data and test data.
        :param data(DataFrame): data to be split.
        :param percent(float): percent of data used as training data.
        :return: training data(DataFrame) and testing data(DataFrame)
        """

        rows = len(data)
        train_rows = math.floor(rows * percent)
        test_rows = rows - train_rows

        return data.iloc[:train_rows], data.iloc[-test_rows:] 
Example 20
Project: backtrader-cn   Author: pandalibin   File: ma.py    GNU General Public License v3.0 5 votes vote down vote up
def get_params_list(cls, training_data, stock_id):
        """
        Get the params list for finding the best strategy.
        :param training_data(DateFrame): data for training.
        :param stock_id(integer): stock on which strategy works.
        :return: list(dict)
        """
        params_list = []

        data_len = len(training_data)
        ma_l_len = math.floor(data_len * 0.2)
        # data_len = 10

        # ma_s_len is [1, data_len * 0.1)
        ma_s_len = math.floor(data_len * 0.1)

        for i in range(1, int(ma_s_len)):
            for j in range(i + 1, int(ma_l_len), 5):
                params = dict(
                    ma_period_s=i,
                    ma_period_l=j,
                    stock_id=stock_id
                )
                params_list.append(params)

        return params_list 
Example 21
Project: drydock   Author: airshipit   File: test_maasdriver_calculate_bytes.py    Apache License 2.0 5 votes vote down vote up
def test_calculate_percent_blockdev(self):
        '''Convert a percent of total blockdev space to explicit byte count.'''
        drive_size = 20 * 1000 * 1000  # 20 mb drive
        part_size = math.floor(.2 * drive_size)  # calculate 20% of drive size
        size_str = '20%'

        drive = BlockDevice(None, size=drive_size, available_size=drive_size)

        calc_size = ApplyNodeStorage.calculate_bytes(
            size_str=size_str, context=drive)

        assert calc_size == part_size 
Example 22
Project: drydock   Author: airshipit   File: test_maasdriver_calculate_bytes.py    Apache License 2.0 5 votes vote down vote up
def test_calculate_percent_vg(self):
        '''Convert a percent of total blockdev space to explicit byte count.'''
        vg_size = 20 * 1000 * 1000  # 20 mb drive
        lv_size = math.floor(.2 * vg_size)  # calculate 20% of drive size
        size_str = '20%'

        vg = VolumeGroup(None, size=vg_size, available_size=vg_size)

        calc_size = ApplyNodeStorage.calculate_bytes(
            size_str=size_str, context=vg)

        assert calc_size == lv_size 
Example 23
Project: python-samples   Author: dek-odoo   File: dek_program073.py    Apache License 2.0 5 votes vote down vote up
def main(numlist, searchnum):
    bottom = 0
    top = len(numlist) - 1
    index = -1

    while top >= bottom and index == -1:
        mid = int(math.floor((top + bottom) / 2.0))

        if numlist[mid] > searchnum:
            top = mid - 1
        elif numlist[mid] == searchnum:
            index = mid
        else:
            bottom = mid + 1
    return index 
Example 24
Project: slidoc   Author: mitotic   File: sdproxy.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def randomNumber(self, *args):
        # randomNumber(seedValue, min, max)
	    # Equally probable integer values between min and max (inclusive)
        # If min is omitted, equally probable integer values between 1 and max
        # If both omitted, value uniformly distributed between 0.0 and 1.0 (<1.0)
        if len(args) <= 1:
            return self.uniform(*args);
        if len(args) == 2:
            maxVal = args[1]
            minVal = 1
        else:
            maxVal = args[2]
            minVal = args[1]
	    return min(maxVal, int(math.floor( minVal + (maxVal-minVal+1)*self.uniform(args[0]) ))) 
Example 25
Project: Learning-Concurrency-in-Python   Author: PacktPublishing   File: poolImprovement.py    MIT License 5 votes vote down vote up
def is_prime(n):
    if n % 2 == 0:
        return False

    sqrt_n = int(math.floor(math.sqrt(n)))
    for i in range(3, sqrt_n + 1, 2):
        if n % i == 0:
            return False
    return True 
Example 26
Project: Learning-Concurrency-in-Python   Author: PacktPublishing   File: mpExample.py    MIT License 5 votes vote down vote up
def is_prime(n):
    if n % 2 == 0:
        return False

    sqrt_n = int(math.floor(math.sqrt(n)))
    for i in range(3, sqrt_n + 1, 2):
        if n % i == 0:
            return False
    return True 
Example 27
Project: petuk.corp   Author: fnugrahendi   File: wget.py    GNU General Public License v2.0 5 votes vote down vote up
def bar_thermometer(current, total, width=80):
    """Return thermometer style progress bar string. `total` argument
    can not be zero. The minimum size of bar returned is 3. Example:

        [..........            ]

    Control and trailing symbols (\r and spaces) are not included.
    See `bar_adaptive` for more information.
    """
    # number of dots on thermometer scale
    avail_dots = width-2
    shaded_dots = int(math.floor(float(current) / total * avail_dots))
    return '[' + '.'*shaded_dots + ' '*(avail_dots-shaded_dots) + ']' 
Example 28
Project: mmdetection   Author: open-mmlab   File: masked_conv.py    Apache License 2.0 5 votes vote down vote up
def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
        assert mask.dim() == 3 and mask.size(0) == 1
        assert features.dim() == 4 and features.size(0) == 1
        assert features.size()[2:] == mask.size()[1:]
        pad_h, pad_w = _pair(padding)
        stride_h, stride_w = _pair(stride)
        if stride_h != 1 or stride_w != 1:
            raise ValueError(
                'Stride could not only be 1 in masked_conv2d currently.')
        if not features.is_cuda:
            raise NotImplementedError

        out_channel, in_channel, kernel_h, kernel_w = weight.size()

        batch_size = features.size(0)
        out_h = int(
            math.floor((features.size(2) + 2 * pad_h -
                        (kernel_h - 1) - 1) / stride_h + 1))
        out_w = int(
            math.floor((features.size(3) + 2 * pad_w -
                        (kernel_h - 1) - 1) / stride_w + 1))
        mask_inds = torch.nonzero(mask[0] > 0)
        output = features.new_zeros(batch_size, out_channel, out_h, out_w)
        if mask_inds.numel() > 0:
            mask_h_idx = mask_inds[:, 0].contiguous()
            mask_w_idx = mask_inds[:, 1].contiguous()
            data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
                                          mask_inds.size(0))
            masked_conv2d_cuda.masked_im2col_forward(features, mask_h_idx,
                                                     mask_w_idx, kernel_h,
                                                     kernel_w, pad_h, pad_w,
                                                     data_col)

            masked_output = torch.addmm(1, bias[:, None], 1,
                                        weight.view(out_channel, -1), data_col)
            masked_conv2d_cuda.masked_col2im_forward(masked_output, mask_h_idx,
                                                     mask_w_idx, out_h, out_w,
                                                     out_channel, output)
        return output 
Example 29
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: densenet.py    MIT License 5 votes vote down vote up
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=1, n_dim=3):
        super(DenseNet, self).__init__()
        self.growth_rate = growth_rate

        num_planes = 2 * growth_rate
        self.conv1 = nn.Conv2d(n_dim, num_planes, kernel_size=3, padding=1, bias=False)

        self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
        num_planes += nblocks[0] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans1 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
        num_planes += nblocks[1] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans2 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
        num_planes += nblocks[2] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans3 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
        num_planes += nblocks[3] * growth_rate

        self.bn = nn.BatchNorm2d(num_planes)

        self.linear = nn.Linear(448, num_classes)
        self.sig = nn.Sigmoid() 
Example 30
Project: neural-fingerprinting   Author: StephanZheng   File: densenet.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
        super(DenseNet, self).__init__()
        self.growth_rate = growth_rate

        num_planes = 2*growth_rate
        self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)

        self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
        num_planes += nblocks[0]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans1 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
        num_planes += nblocks[1]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans2 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
        num_planes += nblocks[2]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans3 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
        num_planes += nblocks[3]*growth_rate

        self.bn = nn.BatchNorm2d(num_planes)
        self.linear = nn.Linear(num_planes, num_classes) 
Example 31
Project: neural-fingerprinting   Author: StephanZheng   File: densenet.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
        super(DenseNet, self).__init__()
        self.growth_rate = growth_rate

        num_planes = 2*growth_rate
        self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)

        self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
        num_planes += nblocks[0]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans1 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
        num_planes += nblocks[1]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans2 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
        num_planes += nblocks[2]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans3 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
        num_planes += nblocks[3]*growth_rate

        self.bn = nn.BatchNorm2d(num_planes)
        self.linear = nn.Linear(num_planes, num_classes) 
Example 32
Project: neural-fingerprinting   Author: StephanZheng   File: densenet.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
        super(DenseNet, self).__init__()
        self.growth_rate = growth_rate

        num_planes = 2*growth_rate
        self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)

        self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
        num_planes += nblocks[0]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans1 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
        num_planes += nblocks[1]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans2 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
        num_planes += nblocks[2]*growth_rate
        out_planes = int(math.floor(num_planes*reduction))
        self.trans3 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
        num_planes += nblocks[3]*growth_rate

        self.bn = nn.BatchNorm2d(num_planes)
        self.linear = nn.Linear(num_planes, num_classes) 
Example 33
Project: CLRS   Author: JasonVann   File: vEB_Tree.py    MIT License 5 votes vote down vote up
def __init__(self, u):
        self.min = None
        self.max = None
        self.u = u
        self.summary = None
        self.ul = math.floor(math.sqrt(self.u))
        self.uh = math.ceil(math.sqrt(self.u))
        #self.cluster = [None] * self.uh
        if not self.is_leaf():
            self.summary = vEB_Tree(self.uh)
            self.cluster = []
            for x in xrange(self.uh):
                self.cluster.append(vEB_Tree(self.ul)) 
Example 34
Project: CLRS   Author: JasonVann   File: vEB_Tree.py    MIT License 5 votes vote down vote up
def high(self, x):
        return math.floor(x/self.ul) 
Example 35
Project: programsynthesishunting   Author: flexgp   File: grammar.py    GNU General Public License v3.0 5 votes vote down vote up
def get_min_ramp_depth(self):
        """
        Find the minimum depth at which ramping can start where we can have
        unique solutions (no duplicates).

        :param self: An instance of the representation.grammar.grammar class.
        :return: The minimum depth at which unique solutions can be generated
        """

        max_tree_depth = params['MAX_INIT_TREE_DEPTH']
        size = params['POPULATION_SIZE']

        # Specify the range of ramping depths
        depths = range(self.min_path, max_tree_depth + 1)

        if size % 2:
            # Population size is odd
            size += 1

        if size / 2 < len(depths):
            # The population size is too small to fully cover all ramping
            # depths. Only ramp to the number of depths we can reach.
            depths = depths[:int(size / 2)]

        # Find the minimum number of unique solutions required to generate
        # sufficient individuals at each depth.
        unique_start = int(floor(size / len(depths)))
        ramp = None

        for i in sorted(self.permutations.keys()):
            # Examine the number of permutations and combinations of unique
            # solutions capable of being generated by a grammar across each
            # depth i.
            if self.permutations[i] > unique_start:
                # If the number of permutations possible at a given depth i is
                # greater than the required number of unique solutions,
                # set the minimum ramp depth and break out of the loop.
                ramp = i
                break
        self.min_ramp = ramp 
Example 36
Project: core   Author: lifemapper   File: geotools.py    GNU General Public License v3.0 5 votes vote down vote up
def gxy2xy(gxy,gt):
   """
   @summary: Convert geographic coordinates to pixel coordinates.
   
   Given a geographic coordinate (in the form of a two element, one
   dimensional array, [0] = x, [1] = y), and an affine transform, this
   function returns the inverse of the transform, that is, the pixel
   coordinates corresponding to the geographic coordinates.
   
   Arguments:
   @param gxy: sequence of two elements [x coordinate, y coordinate]
   @param gt: the affine transformation associated with a dataset
   @return: list of 2 elements [x pixel, y pixel] or None if the transform is 
            invalid.
   """
   xy = [0,0]
   gx = gxy[0]
   gy = gxy[1]
   
   # Determinant of affine transformation
   det = gt[1]*gt[5] - gt[4]*gt[2]

   # If the transformation is not invertable return None
   if det == 0.0:
      return None
   
   t1 = gx*gt[5] - gt[0]*gt[5] - gt[2]*gy + gt[2]*gt[3]
   
   #
   # Note:  by using floor() instead of int(x-0.5) (which truncates) the pixels
   # around the origin are not an extra half unit wide.
   #
   xy[0] = floor( t1 / det )
   t1 = gy*gt[1] - gt[1]*gt[3] - gx*gt[4] + gt[4]*gt[0]
   xy[1] = floor( t1 / det )
   xy[0] = int(xy[0])
   xy[1] = int(xy[1])
   return xy

# ............................................... 
Example 37
Project: treelstm.pytorch   Author: dasguptar   File: utils.py    MIT License 5 votes vote down vote up
def map_label_to_target(label, num_classes):
    target = torch.zeros(1, num_classes, dtype=torch.float, device='cpu')
    ceil = int(math.ceil(label))
    floor = int(math.floor(label))
    if ceil == floor:
        target[0, floor-1] = 1
    else:
        target[0, floor-1] = ceil - label
        target[0, ceil-1] = label - floor
    return target 
Example 38
Project: parasweep   Author: eviatarbach   File: namers.py    MIT License 5 votes vote down vote up
def start(self, length):
        self.count = self.start_at - 1

        # Need to have `zfill_arg` separate because otherwise state can persist
        # across multiple evaluations of `run_sweep`
        if self.zfill_arg is None:
            # Compute how many digits are needed to represent all the numbers
            self.zfill = (math.floor(math.log10(length - 1) + 1) if length != 1
                          else 1)
        else:
            self.zfill = self.zfill_arg
        self.length = length 
Example 39
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: main.py    Apache License 2.0 5 votes vote down vote up
def to_target(x):
    target = np.zeros((1, num_classes))
    ceil = int(math.ceil(x))
    floor = int(math.floor(x))
    if ceil==floor:
        target[0][floor-1] = 1
    else:
        target[0][floor-1] = ceil - x
        target[0][ceil-1] = x - floor
    return mx.nd.array(target) 
Example 40
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: conv_rnn_cell.py    Apache License 2.0 5 votes vote down vote up
def _get_conv_out_size(dimensions, kernels, paddings, dilations):
    return tuple(int(floor(x+2*p-d*(k-1)-1)+1) if x else 0 for x, k, p, d in
                 zip(dimensions, kernels, paddings, dilations)) 
Example 41
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_round_ceil_floor():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:]=5.543
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:]= 2

    test = mx.sym.round(data) + mx.sym.ceil(data) +  mx.sym.floor(data)
    exe_test = test.bind(default_context(), args=[arr_data])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
    assert_almost_equal(out, npout) 
Example 42
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_adaptive_avg_pool_op():
    def py_adaptive_avg_pool(x, height, width):
        # 2D per frame adaptive avg pool
        def adaptive_avg_pool_frame(x, y):
            isizeH, isizeW = x.shape
            osizeH, osizeW = y.shape
            for oh in range(osizeH):
                istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
                iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
                kH = iendH - istartH
                for ow in range(osizeW):
                    istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
                    iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
                    kW = iendW - istartW
                    xsum = 0
                    for ih in range(kH):
                        for iw in range(kW):
                            xsum += x[istartH+ih][istartW+iw]
                    y[oh][ow] = xsum / kH / kW

        B,C,_,_ = x.shape
        y = np.empty([B,C,height, width], dtype=x.dtype)
        for b in range(B):
            for c in range(C):
                adaptive_avg_pool_frame(x[b][c], y[b][c])
        return y
    def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
        x = mx.nd.random.uniform(shape=shape)
        if output_width is None:
            y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
            npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
        else:
            y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
            npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
        assert_almost_equal(y.asnumpy(), npy)
    shape = (2, 2, 10, 10)
    for i in range(1, 11):
        check_adaptive_avg_pool_op(shape, i)
        for j in range(1, 11):
            check_adaptive_avg_pool_op(shape, i, j) 
Example 43
Project: comport   Author: codeforamerica   File: mutators.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def drop_data(self, incident):
        for x in range(0, math.floor(len(incident) * self.percent)):
            field_to_drop = random.choice(list(incident.keys()))
            if field_to_drop not in self.do_not_mutate:
                incident[field_to_drop] = None
        return incident 
Example 44
Project: comport   Author: codeforamerica   File: mutators.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def drop_data(self, incident):
        for x in range(0, math.floor(len(incident) * self.percent)):
            field_to_drop = random.choice(list(incident.keys()))
            if field_to_drop not in self.do_not_mutate:
                incident[field_to_drop] = random.choice(["", " "])
        return incident 
Example 45
Project: comport   Author: codeforamerica   File: mutators.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def alter_data(self, incident):
        for x in range(0, math.floor(len(incident) * self.percent)):
            field_to_drop = random.choice(list(incident.keys()))
            if field_to_drop not in self.do_not_mutate:
                incident[field_to_drop] = random_string(random.randint(0, 140))
        return incident 
Example 46
Project: comport   Author: codeforamerica   File: mutators.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def alter_data(self, incident):
        for x in range(0, math.floor(len(incident) * self.percent)):
            field_to_drop = random.choice(list(incident.keys()))
            if field_to_drop not in self.do_not_mutate and incident[field_to_drop] is not None:
                incident[field_to_drop] = ''.join(random.choice((str.upper, str.lower))(x) for x in incident[field_to_drop])
        return incident 
Example 47
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: resnext.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, channels, cardinality, bottleneck_width, stride,
                 downsample=False, last_gamma=False, use_se=False,
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(Block, self).__init__(**kwargs)
        D = int(math.floor(channels * (bottleneck_width / 64)))
        group_width = cardinality * D

        self.body = nn.HybridSequential(prefix='')
        self.body.add(nn.Conv2D(group_width, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(group_width, kernel_size=3, strides=stride, padding=1,
                                groups=cardinality, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(channels * 4, kernel_size=1, use_bias=False))
        if last_gamma:
            self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        else:
            self.body.add(norm_layer(gamma_initializer='zeros',
                                     **({} if norm_kwargs is None else norm_kwargs)))

        if use_se:
            self.se = nn.HybridSequential(prefix='')
            self.se.add(nn.Conv2D(channels // 4, kernel_size=1, padding=0))
            self.se.add(nn.Activation('relu'))
            self.se.add(nn.Conv2D(channels * 4, kernel_size=1, padding=0))
            self.se.add(nn.Activation('sigmoid'))
        else:
            self.se = None

        if downsample:
            self.downsample = nn.HybridSequential(prefix='')
            self.downsample.add(nn.Conv2D(channels * 4, kernel_size=1, strides=stride,
                                          use_bias=False))
            self.downsample.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        else:
            self.downsample = None 
Example 48
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: senet.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, channels, cardinality, bottleneck_width, stride,
                 downsample=False, downsample_kernel_size=3,
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(SEBlock, self).__init__(**kwargs)
        D = int(math.floor(channels * (bottleneck_width / 64)))
        group_width = cardinality * D

        self.body = nn.HybridSequential(prefix='')
        self.body.add(nn.Conv2D(group_width//2, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(group_width, kernel_size=3, strides=stride, padding=1,
                                groups=cardinality, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(channels * 4, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))

        self.se = nn.HybridSequential(prefix='')
        self.se.add(nn.Conv2D(channels // 4, kernel_size=1, padding=0))
        self.se.add(nn.Activation('relu'))
        self.se.add(nn.Conv2D(channels * 4, kernel_size=1, padding=0))
        self.se.add(nn.Activation('sigmoid'))

        if downsample:
            self.downsample = nn.HybridSequential(prefix='')
            downsample_padding = 1 if downsample_kernel_size == 3 else 0
            self.downsample.add(nn.Conv2D(channels * 4, kernel_size=downsample_kernel_size,
                                          strides=stride,
                                          padding=downsample_padding, use_bias=False))
            self.downsample.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        else:
            self.downsample = None 
Example 49
Project: dojo-toolkit   Author: grupy-sanca   File: timer.py    GNU General Public License v3.0 5 votes vote down vote up
def timer(self):
        while self.ellapsed_time <= self.duration:
            self.ellapsed_time = floor(time.time() - self.start_time)
            time.sleep(1)
        self.is_running = False 
Example 50
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: limits.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, verb, url):
        """Represents a call to this limit from a relevant request.

        @param verb: string http verb (POST, GET, etc.)
        @param url: string URL
        """
        if self.verb != verb or not re.match(self.regex, url):
            return

        now = self._get_time()

        if self.last_request is None:
            self.last_request = now

        leak_value = now - self.last_request

        self.water_level -= leak_value
        self.water_level = max(self.water_level, 0)
        self.water_level += self.request_value

        difference = self.water_level - self.capacity

        self.last_request = now

        if difference > 0:
            self.water_level -= self.request_value
            self.next_request = now + difference
            return difference

        cap = self.capacity
        water = self.water_level
        val = self.value

        self.remaining = math.floor(((cap - water) / cap) * val)
        self.next_request = now 
Example 51
Project: XFLTReaT   Author: earthquake   File: encoding.py    MIT License 5 votes vote down vote up
def get_maximum_length(self, cap):
		full = int(math.floor(cap / 4))*3
		remn = int(math.floor(((cap % 4)/4.0)*3))
		return full + remn 
Example 52
Project: XFLTReaT   Author: earthquake   File: encoding.py    MIT License 5 votes vote down vote up
def get_maximum_length(self, cap):
		full = int(math.floor(cap / 4))*3
		remn = int(math.floor(((cap % 4)/4.0)*3))
		return full + remn 
Example 53
Project: XFLTReaT   Author: earthquake   File: encoding.py    MIT License 5 votes vote down vote up
def get_maximum_length(self, cap):
		full = int(math.floor(cap / 4))*3
		remn = int(math.floor(((cap % 4)/4.0)*3))
		return full + remn 
Example 54
Project: XFLTReaT   Author: earthquake   File: encoding.py    MIT License 5 votes vote down vote up
def get_maximum_length(self, cap):
		full = int(math.floor(cap / 8))*5
		remn = int(math.floor(((cap % 8)/8.0)*5))
		return full + remn 
Example 55
Project: XFLTReaT   Author: earthquake   File: encoding.py    MIT License 5 votes vote down vote up
def get_maximum_length(self, cap):
		return int(math.floor(cap/2)) 
Example 56
Project: XFLTReaT   Author: earthquake   File: encoding.py    MIT License 5 votes vote down vote up
def get_maximum_length(self, cap):
		full = int(math.floor(cap / 8))*7
		remn = int(math.floor(((cap % 8)/8.0)*7))
		return full + remn 
Example 57
Project: vulnerability-engine   Author: RedHatInsights   File: list_view.py    GNU General Public License v2.0 5 votes vote down vote up
def _get_next(curr_offset, curr_limit, curr_total):
        # If 'here + limit' <= total', next is 'here + limit' - else we stay where we are
        return math.floor(min((curr_offset + curr_limit), curr_total - 1) / curr_limit) * curr_limit 
Example 58
Project: vulnerability-engine   Author: RedHatInsights   File: base.py    GNU General Public License v2.0 5 votes vote down vote up
def _parse_list_arguments(cls, kwargs):
        # We may get limit/offset, or page/page_size, or both
        # limit/offset 'wins', if it's set
        # page/page_size defaults to 0/DEFAULT_PAGE_SIZE and limit/offset to DEFAULT_PAGE_SIZE if *neither* are set
        # regardless, make sure limit/offset and page/page_size a) both exist, and b) are consistent, before we leave
        offset_set = kwargs.get('offset', '') or kwargs.get('limit', '')
        page_set = kwargs.get('page', '') or kwargs.get('page_size', '')

        if offset_set:
            limit = cls._check_int_arg(kwargs, "limit", DEFAULT_PAGE_SIZE)
            offset = cls._check_int_arg(kwargs, "offset", 0, True)
            page = floor(offset / limit) + 1
            page_size = limit
        elif page_set:
            page = cls._check_int_arg(kwargs, "page", 1)
            page_size = cls._check_int_arg(kwargs, "page_size", DEFAULT_PAGE_SIZE)
            limit = page_size
            offset = (page - 1) * page_size
        else:
            page = 1
            offset = 0
            page_size = DEFAULT_PAGE_SIZE
            limit = DEFAULT_PAGE_SIZE

        data_format = kwargs.get("data_format", "json")
        if data_format not in ["json", "csv"]:
            raise InvalidArgumentException("Invalid data format: %s" % kwargs.get("data_format", None))

        return {
            "filter": remove_str_nulls(kwargs.get("filter", None)),
            "sort": remove_str_nulls(kwargs.get("sort", None)),
            "page": page,
            "page_size": page_size,
            "limit": limit,
            "offset": offset,
            "data_format": data_format
        } 
Example 59
Project: procgen   Author: juancroldan   File: noise.py    Apache License 2.0 5 votes vote down vote up
def perlin1D(x):
	""" Generate 1D perlin noise.
	Taken from Improving Noise by Ken Perlin, SIGGRAPH 2002. """
	i = floor(x)
	ii = i + 1
	i = i & 0xFF
	ii = ii & 0xFF

	x -= floor(x)
	fx = x * x * x * (x * (x * 6 - 15) + 10)

	return _perlin_lerp(fx, _perlin_grad1D(_perlin_p[i], x), _perlin_grad1D(_perlin_p[ii], x - 1)) * 0.4 / 1.5  # scaling 
Example 60
Project: procgen   Author: juancroldan   File: noise.py    Apache License 2.0 5 votes vote down vote up
def perlin2D(x, y):
	""" Generate 2D perlin noise.
	Taken from Improving Noise by Ken Perlin, SIGGRAPH 2002. """
	X = floor(x) & 0xFF
	Y = floor(y) & 0xFF

	x = x % 1
	y = y % 1

	u = _perlin_fade(x)
	v = _perlin_fade(y)

	A = _perlin_p[X] + Y
	B = _perlin_p[X + 1] + Y

	return _perlin_lerp(
			v,
			_perlin_lerp(
				u,
				_perlin_grad2D(_perlin_p[_perlin_p[A]], x, y),
				_perlin_grad2D(_perlin_p[_perlin_p[B]], x - 1, y)
			),
			_perlin_lerp(
				u,
				_perlin_grad2D(_perlin_p[_perlin_p[A + 1]], x, y - 1),
				_perlin_grad2D(_perlin_p[_perlin_p[B + 1]], x - 1, y - 1)
			)
		) / 1.5  # scaling 
Example 61
Project: IntroToDeepLearning   Author: robb-brown   File: TensorFlowInterface.py    MIT License 5 votes vote down vote up
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01):
	# Receptive Fields Summary
	try:
		W = layer.W
	except:
		W = layer
	wp = W.eval().transpose();
	if len(np.shape(wp)) < 4:		# Fully connected layer, has no shape
		fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)	
	else:			# Convolutional layer already has shape
		features, channels, iy, ix = np.shape(wp)
		if channel is not None:
			fields = wp[:,channel,:,:]
		else:
			fields = np.reshape(wp,[features*channels,iy,ix])

	perRow = int(math.floor(math.sqrt(fields.shape[0])))
	perColumn = int(math.ceil(fields.shape[0]/float(perRow)))

	fig = mpl.figure(figOffset); mpl.clf()
	
	# Using image grid
	from mpl_toolkits.axes_grid1 import ImageGrid
	grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
	for i in range(0,np.shape(fields)[0]):
		im = grid[i].imshow(fields[i],cmap=cmap); 

	grid.cbar_axes[0].colorbar(im)
	mpl.title('%s Receptive Fields' % layer.name)
	
	# old way
	# fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
	# tiled = []
	# for i in range(0,perColumn*perRow,perColumn):
	# 	tiled.append(np.hstack(fields2[i:i+perColumn]))
	# 
	# tiled = np.vstack(tiled)
	# mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
	mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar() 
Example 62
Project: IntroToDeepLearning   Author: robb-brown   File: TensorFlowInterface.py    MIT License 5 votes vote down vote up
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
	# Output summary
	try:
		W = layer.output
	except:
		W = layer
	wp = W.eval(feed_dict=feed_dict);
	if len(np.shape(wp)) < 4:		# Fully connected layer, has no shape
		temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
		fields = np.reshape(temp,[1]+fieldShape)
	else:			# Convolutional layer already has shape
		wp = np.rollaxis(wp,3,0)
		features, channels, iy,ix = np.shape(wp)
		if channel is not None:
			fields = wp[:,channel,:,:]
		else:
			fields = np.reshape(wp,[features*channels,iy,ix])

	perRow = int(math.floor(math.sqrt(fields.shape[0])))
	perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
	fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
	tiled = []
	for i in range(0,perColumn*perRow,perColumn):
		tiled.append(np.hstack(fields2[i:i+perColumn]))

	tiled = np.vstack(tiled)
	if figOffset is not None:
		mpl.figure(figOffset); mpl.clf(); 

	mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar(); 
Example 63
Project: IntroToDeepLearning   Author: robb-brown   File: TensorFlowInterface.py    MIT License 5 votes vote down vote up
def plotFields(layer,fieldShape=None,channel=None,maxFields=25,figName='ReceptiveFields',cmap=None,padding=0.01):
	# Receptive Fields Summary
	W = layer.W
	wp = W.eval().transpose();
	if len(np.shape(wp)) < 4:		# Fully connected layer, has no shape
		fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)
	else:			# Convolutional layer already has shape
		features, channels, iy, ix = np.shape(wp)
		if channel is not None:
			fields = wp[:,channel,:,:]
		else:
			fields = np.reshape(wp,[features*channels,iy,ix])

	fieldsN = min(fields.shape[0],maxFields)
	perRow = int(math.floor(math.sqrt(fieldsN)))
	perColumn = int(math.ceil(fieldsN/float(perRow)))

	fig = mpl.figure(figName); mpl.clf()

	# Using image grid
	from mpl_toolkits.axes_grid1 import ImageGrid
	grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
	for i in range(0,fieldsN):
		im = grid[i].imshow(fields[i],cmap=cmap);

	grid.cbar_axes[0].colorbar(im)
	mpl.title('%s Receptive Fields' % layer.name)

	# old way
	# fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
	# tiled = []
	# for i in range(0,perColumn*perRow,perColumn):
	# 	tiled.append(np.hstack(fields2[i:i+perColumn]))
	#
	# tiled = np.vstack(tiled)
	# mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
	mpl.figure(figName+' Total'); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar() 
Example 64
Project: IntroToDeepLearning   Author: robb-brown   File: TensorFlowInterface.py    MIT License 5 votes vote down vote up
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
	# Output summary
	W = layer.output
	wp = W.eval(feed_dict=feed_dict);
	if len(np.shape(wp)) < 4:		# Fully connected layer, has no shape
		temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
		fields = np.reshape(temp,[1]+fieldShape)
	else:			# Convolutional layer already has shape
		wp = np.rollaxis(wp,3,0)
		features, channels, iy,ix = np.shape(wp)
		if channel is not None:
			fields = wp[:,channel,:,:]
		else:
			fields = np.reshape(wp,[features*channels,iy,ix])

	perRow = int(math.floor(math.sqrt(fields.shape[0])))
	perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
	fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
	tiled = []
	for i in range(0,perColumn*perRow,perColumn):
		tiled.append(np.hstack(fields2[i:i+perColumn]))

	tiled = np.vstack(tiled)
	if figOffset is not None:
		mpl.figure(figOffset); mpl.clf();

	mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar(); 
Example 65
Project: bigquerylayers   Author: smandaric   File: univ.py    GNU General Public License v3.0 5 votes vote down vote up
def __floor__(self):
        return math.floor(self._value) 
Example 66
Project: bigquerylayers   Author: smandaric   File: univ.py    GNU General Public License v3.0 5 votes vote down vote up
def __floor__(self):
        return self.clone(math.floor(float(self))) 
Example 67
Project: mqtt-connectors   Author: akleber   File: battery-controller.py    MIT License 5 votes vote down vote up
def on_message(mqttc, obj, msg):
    if msg.topic == CHG_PCT_TOPIC:
        global chg_pct
        chg_pct = math.floor(float(msg.payload))
        logging.debug("got new chg_pct: {}".format(chg_pct))

    if msg.topic == PV_P_TOPIC:
        global pv_p
        pv_p = math.floor(float(msg.payload))
        logging.debug("got new pv_p: {}".format(pv_p))

    if msg.topic == AUTO_CHG_TOPIC:
        global auto_chg_pct
        if msg.payload == b'True':
            auto_chg_pct = True
            update_chg_p()
        else:
            auto_chg_pct = False
            # reset chg_pct to 100% when auto mode is disabled
            publish_chg_pct(100)

        logging.debug("got new auto_chg_pct: {}".format(auto_chg_pct))

    if msg.topic == SOC_TOPIC:
        global soc
        soc = int(msg.payload)
        logging.debug("got new soc: {}".format(soc)) 
Example 68
Project: forex_backtesting   Author: Robinbeatrix   File: backup_bt.py    MIT License 5 votes vote down vote up
def _getsizing(self, comminfo, cash, data, isbuy):

        multiplier = 0.0001
        atr = self.strategy.atr
        size = 0
        acct_value = cash
        max_risk = math.floor(acct_value * self.p.risk)
        stop_order = (atr*1.5)
        stop_order_pips = stop_order / multiplier
        pip_value = max_risk / stop_order_pips
        units = pip_value / multiplier

        if self.params.counter_currency == True:
                comm_new = abs((self.params.spread * (units * multiplier) / 2))

        elif self.params.base_currency == True:
                comm_new = abs((self.params.spread * ((units / data.close) * multiplier) / 2))

        elif self.params.no_currency == True:
                comm_new = abs((self.params.spread * ((units / data.close) * multiplier) / 2))*self.p.ex_rate

        comm_adj_risk = max_risk - (comm_new * 2)
        pip_value_adj = comm_adj_risk / stop_order_pips
        units_adj = pip_value_adj / multiplier

        if comm_adj_risk < 0:
            return 0 

        if isbuy == True:
            comm_adj_size = units_adj
        else:
            comm_adj_size = units_adj * -1

        comm_adj_size = math.floor(comm_adj_size)

        return comm_adj_size

# ANALYZERS----------- 
Example 69
Project: cat-bbs   Author: aleju   File: common.py    MIT License 4 votes vote down vote up
def to_aspect_ratio_add(image, target_ratio, pad_mode="constant", pad_cval=0, return_paddings=False):
    """Resize an image to a desired aspect ratio by adding pixels to it
    (usually black ones, i.e. zero-padding)."""
    height = image.shape[0]
    width = image.shape[1]
    ratio = width / height

    pad_top = 0
    pad_bottom = 0
    pad_left = 0
    pad_right = 0

    if ratio < target_ratio:
        # vertical image, height > width
        diff = (target_ratio * height) - width
        pad_left = int(math.ceil(diff / 2))
        pad_right = int(math.floor(diff / 2))
    elif ratio > target_ratio:
        # horizontal image, width > height
        diff = ((1/target_ratio) * width) - height
        pad_top = int(math.ceil(diff / 2))
        pad_bottom = int(math.floor(diff / 2))

    if any([val > 0 for val in [pad_top, pad_bottom, pad_left, pad_right]]):
        # constant_values creates error if pad_mode is not "constant"
        if pad_mode == "constant":
            image = np.pad(image, ((pad_top, pad_bottom), \
                                   (pad_left, pad_right), \
                                   (0, 0)), \
                                  mode=pad_mode, constant_values=pad_cval)
        else:
            image = np.pad(image, ((pad_top, pad_bottom), \
                                   (pad_left, pad_right), \
                                   (0, 0)), \
                                  mode=pad_mode)

    result_ratio = image.shape[1] / image.shape[0]
    assert target_ratio - 0.1 < result_ratio < target_ratio + 0.1, \
        "Wrong result ratio: " + str(result_ratio)

    if return_paddings:
        return image, (pad_top, pad_right, pad_bottom, pad_left)
    else:
        return image 
Example 70
Project: cat-bbs   Author: aleju   File: train.py    MIT License 4 votes vote down vote up
def bb_coords_to_grid(bb_coords_one, img_shape, grid_size):
    """Convert bounding box coordinates (corners) to ground truth heatmaps."""
    if isinstance(bb_coords_one, ia.KeypointsOnImage):
        bb_coords_one = bb_coords_one.keypoints

    # bb edges after augmentation
    x1b = min([kp.x for kp in bb_coords_one])
    x2b = max([kp.x for kp in bb_coords_one])
    y1b = min([kp.y for kp in bb_coords_one])
    y2b = max([kp.y for kp in bb_coords_one])

    # clip
    x1c = np.clip(x1b, 0, img_shape[1]-1)
    y1c = np.clip(y1b, 0, img_shape[0]-1)
    x2c = np.clip(x2b, 0, img_shape[1]-1)
    y2c = np.clip(y2b, 0, img_shape[0]-1)

    # project
    x1d = int((x1c / img_shape[1]) * grid_size)
    y1d = int((y1c / img_shape[0]) * grid_size)
    x2d = int((x2c / img_shape[1]) * grid_size)
    y2d = int((y2c / img_shape[0]) * grid_size)

    assert 0 <= x1d < grid_size
    assert 0 <= y1d < grid_size
    assert 0 <= x2d < grid_size
    assert 0 <= y2d < grid_size

    # output ground truth:
    # - 1 heatmap that is 1 everywhere where there is a bounding box
    # - 9 position sensitive heatmaps,
    #   e.g. the first one is 1 everywhere where there is the _top left corner_
    #        of a bounding box,
    #        the second one is 1 for the top center cell,
    #        the third one is 1 for the top right corner,
    #        ...
    grids = np.zeros((grid_size, grid_size, 1+9), dtype=np.float32)
    # first heatmap
    grids[y1d:y2d+1, x1d:x2d+1, 0] = 1
    # position sensitive heatmaps
    nb_cells_x = 3
    nb_cells_y = 3
    cell_width = (x2d - x1d) / nb_cells_x
    cell_height = (y2d - y1d) / nb_cells_y
    cell_counter = 0
    for j in range(nb_cells_y):
        cell_y1 = y1d + cell_height * j
        cell_y2 = cell_y1 + cell_height
        cell_y1_int = np.clip(int(math.floor(cell_y1)), 0, img_shape[0]-1)
        cell_y2_int = np.clip(int(math.floor(cell_y2)), 0, img_shape[0]-1)
        for i in range(nb_cells_x):
            cell_x1 = x1d + cell_width * i
            cell_x2 = cell_x1 + cell_width
            cell_x1_int = np.clip(int(math.floor(cell_x1)), 0, img_shape[1]-1)
            cell_x2_int = np.clip(int(math.floor(cell_x2)), 0, img_shape[1]-1)
            grids[cell_y1_int:cell_y2_int+1, cell_x1_int:cell_x2_int+1, 1+cell_counter] = 1
            cell_counter += 1
    return grids 
Example 71
Project: pyblish-win   Author: pyblish   File: fractions.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def limit_denominator(self, max_denominator=1000000):
        """Closest Fraction to self with denominator at most max_denominator.

        >>> Fraction('3.141592653589793').limit_denominator(10)
        Fraction(22, 7)
        >>> Fraction('3.141592653589793').limit_denominator(100)
        Fraction(311, 99)
        >>> Fraction(4321, 8765).limit_denominator(10000)
        Fraction(4321, 8765)

        """
        # Algorithm notes: For any real number x, define a *best upper
        # approximation* to x to be a rational number p/q such that:
        #
        #   (1) p/q >= x, and
        #   (2) if p/q > r/s >= x then s > q, for any rational r/s.
        #
        # Define *best lower approximation* similarly.  Then it can be
        # proved that a rational number is a best upper or lower
        # approximation to x if, and only if, it is a convergent or
        # semiconvergent of the (unique shortest) continued fraction
        # associated to x.
        #
        # To find a best rational approximation with denominator <= M,
        # we find the best upper and lower approximations with
        # denominator <= M and take whichever of these is closer to x.
        # In the event of a tie, the bound with smaller denominator is
        # chosen.  If both denominators are equal (which can happen
        # only when max_denominator == 1 and self is midway between
        # two integers) the lower bound---i.e., the floor of self, is
        # taken.

        if max_denominator < 1:
            raise ValueError("max_denominator should be at least 1")
        if self._denominator <= max_denominator:
            return Fraction(self)

        p0, q0, p1, q1 = 0, 1, 1, 0
        n, d = self._numerator, self._denominator
        while True:
            a = n//d
            q2 = q0+a*q1
            if q2 > max_denominator:
                break
            p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
            n, d = d, n-a*d

        k = (max_denominator-q0)//q1
        bound1 = Fraction(p0+k*p1, q0+k*q1)
        bound2 = Fraction(p1, q1)
        if abs(bound2 - self) <= abs(bound1-self):
            return bound2
        else:
            return bound1 
Example 72
Project: drydock   Author: airshipit   File: node.py    Apache License 2.0 4 votes vote down vote up
def calculate_bytes(cls, size_str=None, context=None):
        """Calculate the size on bytes of a size_str.

        Calculate the size as specified in size_str in the context of the provided
        blockdev or vg. Valid size_str format below.

        #m or #M or #mb or #MB = # * 1024 * 1024
        #g or #G or #gb or #GB = # * 1024 * 1024 * 1024
        #t or #T or #tb or #TB = # * 1024 * 1024 * 1024 * 1024
        #% = Percentage of the total storage in the context

        Prepend '>' to the above to note the size as a minimum and the calculated size being the
        remaining storage available above the minimum

        If the calculated size is not available in the context, a NotEnoughStorage exception is
        raised.

        :param size_str: A string representing the desired size
        :param context: An instance of maasdriver.models.blockdev.BlockDevice or
                        instance of maasdriver.models.volumegroup.VolumeGroup. The
                        size_str is interpreted in the context of this device
        :return size: The calculated size in bytes
        """
        pattern = r'(>?)(\d+)([mMbBgGtT%]{1,2})'
        regex = re.compile(pattern)
        match = regex.match(size_str)

        if not match:
            raise errors.InvalidSizeFormat(
                "Invalid size string format: %s" % size_str)

        if ((match.group(1) == '>' or match.group(3) == '%') and not context):
            raise errors.InvalidSizeFormat(
                'Sizes using the ">" or "%" format must specify a '
                'block device or volume group context')

        base_size = int(match.group(2))

        if match.group(3) in ['m', 'M', 'mb', 'MB']:
            computed_size = base_size * (1000 * 1000)
        elif match.group(3) in ['g', 'G', 'gb', 'GB']:
            computed_size = base_size * (1000 * 1000 * 1000)
        elif match.group(3) in ['t', 'T', 'tb', 'TB']:
            computed_size = base_size * (1000 * 1000 * 1000 * 1000)
        elif match.group(3) == '%':
            computed_size = math.floor((base_size / 100) * int(context.size))

        if computed_size > int(context.available_size):
            raise errors.NotEnoughStorage()

        if match.group(1) == '>':
            computed_size = int(context.available_size
                                ) - ApplyNodeStorage.PART_TABLE_RESERVATION

        return computed_size 
Example 73
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: minidensenet.py    MIT License 4 votes vote down vote up
def __init__(self, growthRate, depth, reduction, nClasses, bottleneck, n_dim):
        super(MiniDenseNet, self).__init__()

        nDenseBlocks = (depth - 4) // 3
        if bottleneck:
            nDenseBlocks //= 2

        nChannels = 2 * growthRate
        self.conv1 = nn.Conv2d(n_dim, nChannels, kernel_size=3, padding=1, bias=False)
        self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
        nChannels += nDenseBlocks * growthRate
        nOutChannels = int(math.floor(nChannels * reduction))
        self.trans1 = Transition(nChannels, nOutChannels)

        nChannels = nOutChannels
        self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
        nChannels += nDenseBlocks * growthRate
        nOutChannels = int(math.floor(nChannels * reduction))
        self.trans2 = Transition(nChannels, nOutChannels)

        nChannels = nOutChannels
        self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
        nChannels += nDenseBlocks * growthRate

        self.bn1 = nn.BatchNorm2d(nChannels)
        if bottleneck == False:
            self.fc = nn.Linear(768, nClasses)
        else:
            self.fc = nn.Linear(432, nClasses)

        self.sig = nn.Sigmoid()
        self.num_classes =nClasses

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_() 
Example 74
Project: programsynthesishunting   Author: flexgp   File: initialisation.py    GNU General Public License v3.0 4 votes vote down vote up
def seed_individuals(size):
    """
    Create a population of size where all individuals are copies of the same
    seeded individuals.
    
    :param size: The size of the required population.
    :return: A full population composed of the seeded individuals.
    """
    
    # Get total number of seed inds.
    no_seeds = len(params['SEED_INDIVIDUALS'])
    
    # Initialise empty population.
    individuals = []
    
    if no_seeds > 0:
        # A list of individuals has been specified as the seed.
        
        # Divide requested population size by the number of seeds.
        num_per_seed = floor(size/no_seeds)
        
        for ind in params['SEED_INDIVIDUALS']:
        
            if not isinstance(ind, individual.Individual):
                # The seed object is not a PonyGE individual.
                s = "operators.initialisation.seed_individuals\n" \
                    "Error: SEED_INDIVIDUALS instance is not a PonyGE " \
                    "individual."
                raise Exception(s)
            
            else:
                # Generate num_per_seed identical seed individuals.
                individuals.extend([ind.deep_copy() for _ in
                                    range(num_per_seed)])
    
        return individuals
    
    else:
        # No seed individual specified.
        s = "operators.initialisation.seed_individuals\n" \
            "Error: No seed individual specified for seed initialisation."
        raise Exception(s) 
Example 75
Project: video2commons   Author: toolforge   File: transcodejob.py    GNU General Public License v3.0 4 votes vote down vote up
def run_shell_exec(self, cmd, track=True):
        """
        Run the shell exec command.

        @param cmd String Command to be run
        @return int, string
        """
        cmd = 'ulimit -f ' + escape_shellarg(background_size_limit) + ';' + \
            'ulimit -v ' + escape_shellarg(background_memory_limit) + ';' + \
            'nice -n ' + escape_shellarg(background_priority) + ' ' + \
            'timeout ' + escape_shellarg(background_time_limit) + ' ' + \
            cmd + \
            ' 2>&1'

        # Adapted from https://gist.github.com/marazmiki/3015621
        process = subprocess.Popen(
            cmd, stdin=None, stdout=subprocess.PIPE, stderr=None,
            universal_newlines=True, shell=True, preexec_fn=os.setsid
        )

        re_duration = re.compile(r'Duration: (\d{2}:\d{2}:\d{2})')
        re_position = re.compile(r'time=(\d{2}:\d{2}:\d{2})', re.I)

        duration = None
        position = None
        newpercentage = percentage = -1

        while process.poll() is None:
            # for line in process.stdout.readlines():
            # http://bugs.python.org/issue3907
            while True:
                line = process.stdout.readline()
                if not line:
                    break

                if track:
                    if duration is None:
                        duration_match = re_duration.search(line)
                        if duration_match:
                            duration = time_to_seconds(duration_match.group(1))
                    else:
                        position_match = re_position.search(line)
                        if position_match:
                            position = time_to_seconds(position_match.group(1))
                            if duration and position:
                                newpercentage = min(int(
                                    math.floor(100 * position / duration)
                                ), 100)

                    if newpercentage != percentage:
                        percentage = newpercentage
                        try:
                            self.statuscallback(None, percentage)
                        except TaskAbort:
                            os.killpg(os.getpgid(process.pid), signal.SIGTERM)
                            raise

            time.sleep(2)

        process.stdout.close()
        return process.returncode, '' 
Example 76
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: utils.py    MIT License 4 votes vote down vote up
def sizeCorrectionImage(img, factor, imgSize):
# assumes that input image size is larger than minImgSize, except for z-dimension
# factor is important in order to resample image by 1/factor (e.g. due to slice thickness) without any errors
    size = img.GetSize()
    correction = False
    # check if bounding box size is multiple of 'factor' and correct if necessary
    # x-direction
    if (size[0])%factor != 0:
        cX = factor-(size[0]%factor)
        correction = True
    else:
        cX = 0
    # y-direction
    if (size[1])%factor != 0:
        cY = factor-((size[1])%factor)
        correction = True
    else:
        cY  = 0

    if (size[2]) !=imgSize:
        cZ = (imgSize-size[2])
        # if z image size is larger than maxImgsSize, crop it (customized to the data at hand. Better if ROI extraction crops image)
        if cZ <0:
            print('image gets filtered')
            cropFilter = sitk.CropImageFilter()
            cropFilter.SetUpperBoundaryCropSize([0,0,int(math.floor(-cZ/2))])
            cropFilter.SetLowerBoundaryCropSize([0,0,int(math.ceil(-cZ/2))])
            img = cropFilter.Execute(img)
            cZ=0
        else:
            correction = True
    else:
        cZ = 0

    # if correction is necessary, increase size of image with padding
    if correction:
        filter = sitk.ConstantPadImageFilter()
        filter.SetPadLowerBound([int(math.floor(cX/2)), int(math.floor(cY/2)), int(math.floor(cZ/2))])
        filter.SetPadUpperBound([math.ceil(cX/2), math.ceil(cY), math.ceil(cZ/2)])
        filter.SetConstant(0)
        outPadding = filter.Execute(img)
        return outPadding

    else:
        return img 
Example 77
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
    num_rois = input_rois.shape[0]
    output_offset = input_offset.copy()
    # simulate deformable psroipooling forward function
    for roi_idx in range(num_rois):
        sub_rois = input_rois[roi_idx, :].astype(np.float32)
        img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
        roi_start_w = round(x0) * spatial_scale - 0.5
        roi_start_h = round(y0) * spatial_scale - 0.5
        roi_end_w = round(x1 + 1) * spatial_scale - 0.5
        roi_end_h = round(y1 + 1) * spatial_scale - 0.5
        roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
        bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
        sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
        for c_top in range(output_dim):
            channel_each_cls = output_dim / num_classes
            class_id = int(c_top / channel_each_cls)
            for ph in range(pooled_h):
                for pw in range(pooled_w):
                    part_h = int(math.floor(float(ph) / pooled_h * part_size))
                    part_w = int(math.floor(float(pw) / pooled_w * part_size))
                    trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
                    trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
                    bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w

                    need_check = True
                    while need_check:
                        pass_check = True
                        for ih in range(sample_per_part):
                            for iw in range(sample_per_part):
                                h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
                                w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w

                                if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
                                    continue

                                w = min(max(w, 0.1), feat_w - 1.1)
                                h = min(max(h, 0.1), feat_h - 1.1)
                                # if the following condiiton holds, the sampling location is not differentiable
                                # therefore we need to re-do the sampling process
                                if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
                                    trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
                                    pass_check = False
                                    break
                            if not pass_check:
                                break
                        if pass_check:
                            output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
                            output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
                            need_check = False

    return output_offset 
Example 78
Project: zippy   Author: Illumina   File: bwa.py    Apache License 2.0 4 votes vote down vote up
def workflow(self):
        # Create the output directory and create a dummy samplesheet
        cmd = "mkdir -p {}".format(self.output_dir)
        self.addTask(label="make_out_dir", command=cmd, isForceLocal=True)
        out_bam = os.path.join(self.output_dir, "out.bam")

    
        # from fastq:
        if len(self.fastq) == 2:
            fastq = " ".join(self.fastq)
        elif len(self.fastq) == 1:
            fastq = self.fastq
        else:
            raise("More than two FASTQs passed to bwamem!")
        if self.args != "":
            self.args=" "+self.args
        else:
            self.args = " -M -R \'@RG\\tID:1\\tLB:{0}\\tPL:ILLUMINA\\tSM:{0}\'".format(self.sample)
        # Figure out the number of cores we can use for alignment and for bam compression
        total_threads = self.cores * 2  # At least 2
        addtl_compression_threads = max(int(0.1 * total_threads), 1) # At a minimum, allocate one extra thread for bam compression
        bwa_threads = total_threads  # Since BWA's output is thread-dependent, we don't decrement here in order to avoid surprises
        assert bwa_threads >= 1
        cmd = "%s mem" % self.bwa_exec \
              + " -t %i" % (bwa_threads) \
              + self.args \
              + " %s %s" % (self.genome_fa, fastq) \
              + " | %s view [email protected] %i -1 -o %s -" % (self.samtools_exec, addtl_compression_threads, out_bam)
        self.flowLog(cmd)
        self.addTask(label="bwamem", command=cmd, nCores=self.cores,
                     memMb=self.mem, dependencies="make_out_dir")


        # Sort BAM
        out_sorted_bam = os.path.join(self.output_dir, "out.sorted.bam")
        out_temp = os.path.join(self.output_dir, "tmp")
        # Calculate resources for sort
        sort_threads = self.cores * 2
        mem_per_thread = int(math.floor(float(self.mem) / sort_threads * 0.75))  # Per thread, underallocate to allow some overhead
        cmd = self.samtools_exec \
              + " sort %s" % out_bam \
              + " -O bam" \
              + " -o " + out_sorted_bam \
              + " -T " + out_temp \
              + " [email protected] %i" % sort_threads
        self.addTask(label="sort_bam", command=cmd, nCores=self.cores, memMb=self.mem, dependencies="bwamem")

        # Clean up the unsorted BAM
        cmd = "rm {}".format(out_bam)
        self.addTask(label="del_unsorted_bam", command=cmd, dependencies="sort_bam") 
Example 79
Project: procgen   Author: juancroldan   File: noise.py    Apache License 2.0 4 votes vote down vote up
def perlin3D(x, y, z):
	""" Generate 3D perlin noise.
	Taken from Improving Noise by Ken Perlin, SIGGRAPH 2002. """
	X = floor(x) & 0xFF
	Y = floor(y) & 0xFF
	Z = floor(z) & 0xFF

	x = x % 1
	y = y % 1
	z = z % 1

	u = _perlin_fade(x)
	v = _perlin_fade(y)
	w = _perlin_fade(z)

	A = _perlin_p[X] + Y
	AA = _perlin_p[A] + Z
	AB = _perlin_p[A + 1] + Z
	B = _perlin_p[X + 1] + Y
	BA = _perlin_p[B] + Z
	BB = _perlin_p[B + 1] + Z

	return _perlin_lerp(
		w,
		_perlin_lerp(
			v,
			_perlin_lerp(
				u,
				_perlin_grad3D(_perlin_p[AA], x, y, z),
				_perlin_grad3D(_perlin_p[BA], x - 1, y, z)
			),
			_perlin_lerp(
				u,
				_perlin_grad3D(_perlin_p[AB], x, y - 1, z),
				_perlin_grad3D(_perlin_p[BB], x - 1, y - 1, z)
			)
		),
		_perlin_lerp(
			v,
			_perlin_lerp(
				u,
				_perlin_grad3D(_perlin_p[AA + 1], x, y, z - 1),
				_perlin_grad3D(_perlin_p[BA + 1], x - 1, y, z - 1)
			),
			_perlin_lerp(
				u,
				_perlin_grad3D(_perlin_p[AB + 1], x, y - 1, z - 1),
				_perlin_grad3D(_perlin_p[BB + 1], x - 1, y - 1, z - 1)
			)
		)
	) 
Example 80
Project: procgen   Author: juancroldan   File: noise.py    Apache License 2.0 4 votes vote down vote up
def simplex2D(x, y):
	""" Generate 2D simplex noise
	Taken from Stefan Gustavson 2012 Java implementation. """
	s = (x + y) * _simplex_F2
	i = floor(x + s)
	j = floor(y + s)
	t = (i + j) * _simplex_G2
	# unskew the cell origin back to (x,y) space
	X0 = i - t
	Y0 = j - t
	# the x,y distances from the cell origin
	x0 = x - X0
	y0 = y - Y0
	# for the 2D case, the simplex shape is an equilateral triangle.
	# determine which simplex we are in.
	if x0 > y0:
		# lower triangle, XY order: (0,0)->(1,0)->(1,1)
		i1, j1 = 1, 0
	else:
		# upper triangle, YX order: (0,0)->(0,1)->(1,1)
		i1, j1 = 0, 1

	# offsets for middle corner in (x,y) unskewed coords
	x1 = x0 - i1 + _simplex_G2
	y1 = y0 - j1 + _simplex_G2
	# offsets for last corner in (x,y) unskewed coords
	x2 = x0 - 1 + 2 * _simplex_G2
	y2 = y0 - 1 + 2 * _simplex_G2
	# work out the hashed gradient indices of the three simplex corners
	ii = i & 255
	jj = j & 255
	gi0 = _simplex_perm_mod12[ii + _simplex_perm[jj]]
	gi1 = _simplex_perm_mod12[ii + i1 + _simplex_perm[jj + j1]]
	gi2 = _simplex_perm_mod12[ii + 1 + _simplex_perm[jj + 1]]
	# calculate the contribution from the three corners
	t0 = 0.5 - x0**2 - y0**2
	if t0 < 0:
		n0 = 0
	else:
		t0 *= t0
		n0 = t0 * t0 * _simplex_dot2D(_simplex_grad3[gi0], x0, y0)
	t1 = 0.5 - x1**2 - y1**2
	if t1 < 0:
		n1 = 0
	else:
		t1 *= t1
		n1 = t1**2 * _simplex_dot2D(_simplex_grad3[gi1], x1, y1)
	t2 = 0.5 - x2**2 - y2**2
	if t2 < 0:
		n2 = 0
	else:
		t2 *= t2
		n2 = t2 * t2 * _simplex_dot2D(_simplex_grad3[gi2], x2, y2)
	return 70 * (n0 + n1 + n2)