Python math.sqrt() Examples

The following are code examples for showing how to use math.sqrt(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Wide-Residual-Nets-for-SETI   Author: sgrvinod   File: wresnet_models.py    Apache License 2.0 8 votes vote down vote up
def __init__(self, n, k, block=BasicBlock, dropout=0.0):
        super(WideResNet, self).__init__()

        if (n - 4) % 6 != 0:
            raise ValueError("Invalid depth! Depth must be (6 * n_blocks + 4).")
        n_blocks = (n - 4) / 6

        self.conv_block1 = nn.Conv2d(2, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.conv_block2 = self._make_layer(block, n_blocks, 16, 16 * k, 2, dropout)
        self.conv_block3 = self._make_layer(block, n_blocks, 16 * k, 32 * k, 2, dropout)
        self.conv_block4 = self._make_layer(block, n_blocks, 32 * k, 64 * k, 2, dropout)
        self.bn1 = nn.BatchNorm2d(64 * k)
        self.relu = nn.ReLU(inplace=True)
        self.fc = nn.Linear(64 * k * 6 * 8, 7)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n_weights = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n_weights))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_() 
Example 2
Project: L.E.S.M.A   Author: NatanaelAntonioli   File: L.E.S.M.A. - Fabrica de Noobs Speedtest.py    Apache License 2.0 7 votes vote down vote up
def distance(origin, destination):
	"""Determine distance between 2 sets of [lat,lon] in km"""

	lat1, lon1 = origin
	lat2, lon2 = destination
	radius = 6371  # km

	dlat = math.radians(lat2 - lat1)
	dlon = math.radians(lon2 - lon1)
	a = (math.sin(dlat / 2) * math.sin(dlat / 2) +
		 math.cos(math.radians(lat1)) *
		 math.cos(math.radians(lat2)) * math.sin(dlon / 2) *
		 math.sin(dlon / 2))
	c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
	d = radius * c

	return d 
Example 3
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: senet.py    MIT License 7 votes vote down vote up
def __init__(self, block, n_size=1, num_classes=1, num_rgb=2, base=32):
        super(IceResNet, self).__init__()
        self.base = base
        self.num_classes = num_classes
        self.inplane = self.base  # 45 epochs
        # self.inplane = 16 # 57 epochs
        self.conv1 = nn.Conv2d(num_rgb, self.inplane, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(self.inplane)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self._make_layer(block, self.inplane, blocks=2 * n_size, stride=2)
        self.layer2 = self._make_layer(block, self.inplane * 2, blocks=2 * n_size, stride=2)
        self.layer3 = self._make_layer(block, self.inplane * 4, blocks=2 * n_size, stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d(1)

        self.fc = nn.Linear(int(8 * self.base), num_classes)
        nn.init.kaiming_normal(self.fc.weight)
        self.sig = nn.Sigmoid()

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example 4
Project: unicorn-hat-hd   Author: pimoroni   File: demo.py    MIT License 6 votes vote down vote up
def swirl(x, y, step):
    x -= (u_width / 2)
    y -= (u_height / 2)
    dist = math.sqrt(pow(x, 2) + pow(y, 2)) / 2.0
    angle = (step / 10.0) + (dist * 1.5)
    s = math.sin(angle)
    c = math.cos(angle)
    xs = x * c - y * s
    ys = x * s + y * c
    r = abs(xs + ys)
    r = r * 12.0
    r -= 20
    return (r, r + (s * 130), r + (c * 130))


# roto-zooming checker board 
Example 5
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 6 votes vote down vote up
def chances(team_a, team_b, DRAW_PROBABILITY, BETA):

    draw_margin = get_draw_margin_from_draw_probability(
        DRAW_PROBABILITY, BETA, len(team_a) + len(team_b))

    c = math.sqrt(sum([p.sigma ** 2 for p in team_a]) +
                  sum([p.sigma ** 2 for p in team_b]) +
                  len(team_a + team_b) * BETA ** 2)
    skill_a = sum([p.mu for p in team_a])
    skill_b = sum([p.mu for p in team_b])
    skill_delta = skill_a - skill_b

    skill_delta /= c
    draw_margin /= c
    p_win = gaussian_cumulative_to(skill_delta - abs(draw_margin))
    p_loss = gaussian_cumulative_to(-skill_delta - abs(draw_margin))

    p_draw = (gaussian_cumulative_to(abs(skill_delta) + abs(draw_margin))
              - gaussian_cumulative_to(abs(skill_delta) - abs(draw_margin)))

    return p_win, p_draw, p_loss 
Example 6
Project: chainer-openai-transformer-lm   Author: soskek   File: opt.py    MIT License 6 votes vote down vote up
def update_core_cpu(self, param):
        grad = param.grad
        if grad is None:
            return
        hp = self.hyperparam
        eps = grad.dtype.type(hp.eps)
        if hp.eps != 0 and eps == 0:
            raise ValueError(
                'eps of Adam optimizer is too small for {} ({})'.format(
                    grad.dtype.name, hp.eps))
        m, v = self.state['m'], self.state['v']

        m += (1 - hp.beta1) * (grad - m)
        v += (1 - hp.beta2) * (grad * grad - v)

        vhat = v
        # This adam multipies schduled adaptive learning rate
        # with both main term and weight decay.
        # Normal Adam: param.data -= hp.eta * (self.lr * m / (numpy.sqrt(vhat) + hp.eps) +
        #                                      hp.weight_decay_rate * param.data)
        param.data -= hp.eta * self.lr * (m / (numpy.sqrt(vhat) + hp.eps) +
                                          hp.weight_decay_rate * param.data) 
Example 7
Project: chainer-openai-transformer-lm   Author: soskek   File: opt.py    MIT License 6 votes vote down vote up
def update_core_gpu(self, param):
        grad = param.grad
        if grad is None:
            return

        hp = self.hyperparam
        eps = grad.dtype.type(hp.eps)
        if hp.eps != 0 and eps == 0:
            raise ValueError(
                'eps of Adam optimizer is too small for {} ({})'.format(
                    grad.dtype.name, hp.eps))

        cuda.elementwise(
            'T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps, \
             T eta, T weight_decay_rate',
            'T param, T m, T v',
            '''m += one_minus_beta1 * (grad - m);
               v += one_minus_beta2 * (grad * grad - v);
               param -= eta * lr * (m / (sqrt(v) + eps) +
                               weight_decay_rate * param);''',
            'adam')(grad, self.lr, 1 - hp.beta1,
                    1 - hp.beta2, hp.eps,
                    hp.eta, hp.weight_decay_rate,
                    param.data, self.state['m'], self.state['v']) 
Example 8
Project: cat-bbs   Author: aleju   File: model.py    MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(MyResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        # note the increasing dilation
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilation=1)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)

        # these layers will not be used
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example 9
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def _test_generator(n, func, args):
    import time
    print n, 'times', func.__name__
    total = 0.0
    sqsum = 0.0
    smallest = 1e10
    largest = -1e10
    t0 = time.time()
    for i in range(n):
        x = func(*args)
        total += x
        sqsum = sqsum + x*x
        smallest = min(x, smallest)
        largest = max(x, largest)
    t1 = time.time()
    print round(t1-t0, 3), 'sec,',
    avg = total/n
    stddev = _sqrt(sqsum/n - avg*avg)
    print 'avg %g, stddev %g, min %g, max %g' % \
              (avg, stddev, smallest, largest) 
Example 10
Project: building-boundary   Author: Geodan   File: segment.py    MIT License 6 votes vote down vote up
def dist_point_line(self, point):
        """
        Computes the distance from the given point to the fitted line.

        Parameters
        ----------
        point : (1x2) array
            The X and Y coordinates of a point.

        Returns
        -------
        dist : float
            The distance from the given point to the fitted line.

        .. [1] https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
        """
        dist = (abs(self.a * point[0] + self.b * point[1] + self.c) /
                math.sqrt(self.a**2 + self.b**2))
        return dist 
Example 11
Project: python-samples   Author: dek-odoo   File: dek_program021.py    Apache License 2.0 6 votes vote down vote up
def main():
    position = [0, 0]
    while True:
        input_value = raw_input()
        position_value = input_value.split(' ')
        if position_value[0].upper() == 'UP':
            position[0] += int(position_value[1])
        elif position_value[0].upper() == 'DOWN':
            position[0] -= int(position_value[1])
        elif position_value[0].upper() == 'LEFT':
            position[1] -= int(position_value[1])
        elif position_value[0].upper() == 'RIGHT':
            position[1] += int(position_value[1])
        elif not input_value:
            break
        else:
            pass
    print position[0], position[1]
    print 'Current Position is :',
    print int(round(math.sqrt(position[0] ** 2 + position[1] ** 2))) 
Example 12
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: resnet_v1.py    MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
    self.inplanes = 64
    super(ResNet, self).__init__()
    self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                 bias=False)
    self.bn1 = nn.BatchNorm2d(64)
    self.relu = nn.ReLU(inplace=True)
    # maxpool different from pytorch-resnet, to match tf-faster-rcnn
    self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
    self.layer1 = self._make_layer(block, 64, layers[0])
    self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
    self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
    # use stride 1 for the last conv4 layer (same as tf-faster-rcnn)
    self.layer4 = self._make_layer(block, 512, layers[3], stride=1)

    for m in self.modules():
      if isinstance(m, nn.Conv2d):
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))
      elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_() 
Example 13
Project: comet-commonsense   Author: atcbosselut   File: gpt.py    Apache License 2.0 6 votes vote down vote up
def _attn(self, q, k, v, sequence_mask):
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))

        b_subset = self.b[:, :, :w.size(-2), :w.size(-1)]

        if sequence_mask is not None:
            b_subset = b_subset * sequence_mask.view(
                sequence_mask.size(0), 1, -1)
            b_subset = b_subset.permute(1, 0, 2, 3)

        w = w * b_subset + -1e9 * (1 - b_subset)
        w = nn.Softmax(dim=-1)(w)
        w = self.attn_dropout(w)
        return torch.matmul(w, v) 
Example 14
Project: InkscapeBarrelDistortion   Author: ucuapps   File: distortion.py    MIT License 6 votes vote down vote up
def distort_coordinates(self, x, y):
        """Method applies barrel distorsion to given points with distorsion center in center of image, selected to 
        
        Args:
            x (float): X coordinate of given point
            y (float): Y coordinate of given point
        
        Returns:
            tuple(float, float): Tuple with X,Y distorted coordinates of given point
        """
        x_u = (x - self.x_c) / (self.width + self.height)
        y_u = (y - self.y_c) / (self.width + self.height)
        x_d = x_u / 2 / (self.q * y_u**2 + x_u**2 * self.q) * (
            1 - math.sqrt(1 - 4 * self.q * y_u**2 - 4 * x_u**2 * self.q))
        y_d = y_u / 2 / (self.q * y_u**2 + x_u**2 * self.q) * (
            1 - math.sqrt(1 - 4 * self.q * y_u**2 - 4 * x_u**2 * self.q))
        x_d *= self.width + self.height
        y_d *= self.width + self.height
        x_d += self.x_c
        y_d += self.y_c
        return x_d, y_d 
Example 15
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: main-tf-audio.py    MIT License 6 votes vote down vote up
def __init__(self, block, n_size=1, num_classes=30, base=32):
        super(TFAudioSENet, self).__init__()
        self.base = base
        self.inplane = self.base  # 45 epochs
        # self.inplane = 16 # 57 epochs
        self.conv1 = nn.Conv2d(1, self.inplane, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(self.inplane)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self._make_layer(block, self.inplane, blocks=2 * n_size, stride=2)
        self.layer2 = self._make_layer(block, self.inplane * 2, blocks=2 * n_size, stride=2)
        self.layer3 = self._make_layer(block, self.inplane * 4, blocks=2 * n_size, stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d(1)

        self.fc = nn.Linear(int(8 * self.base), num_classes)
        # nn.init.kaiming_normal(self.fc.weight)
        # self.sig = nn.Sigmoid()

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example 16
Project: streetview_objectmapping   Author: vlkryl   File: objectmapping.py    MIT License 6 votes vote down vote up
def haversine(lon1, lat1, lon2, lat2):
    """
    Calculate the great circle distance between two points 
    on the earth (specified in decimal degrees)
    """
    # convert decimal degrees to radians 
    lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
    # haversine formula 
    dlon = lon2 - lon1 
    dlat = lat2 - lat1 
    a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
    c = 2 * asin(sqrt(a)) 
    m = 6367000. * c
    return m

# calculating the intersection  point between two rays (specified each by camera position and depth-estimated object location) 
Example 17
Project: programsynthesishunting   Author: flexgp   File: zdt1.py    GNU General Public License v3.0 6 votes vote down vote up
def evaluate(self, ind, **kwargs):
        
        min_value = [0] * 30
        max_value = [1] * 30
        
        real_chromosome = binary_phen_to_float(ind.phenotype, 30, min_value,
                                               max_value)
        
        summation = 0
        for i in range(1, len(real_chromosome)):
            summation += real_chromosome[i]
        
        g = 1 + 9 * summation / (len(real_chromosome) - 1.0)
        h = 1 - sqrt(real_chromosome[0] / g)
        
        return g * h 
Example 18
Project: unicorn-hat-hd   Author: pimoroni   File: demo.py    MIT License 5 votes vote down vote up
def tunnel(x, y, step):
    speed = step / 100.0
    x -= (u_width / 2)
    y -= (u_height / 2)
    xo = math.sin(step / 27.0) * 2
    yo = math.cos(step / 18.0) * 2
    x += xo
    y += yo
    if y == 0:
        if x < 0:
            angle = -(math.pi / 2)
        else:
            angle = (math.pi / 2)
    else:
        angle = math.atan(x / y)
    if y > 0:
        angle += math.pi
    angle /= 2 * math.pi  # convert angle to 0...1 range
    hyp = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
    shade = hyp / 2.1
    shade = 1 if shade > 1 else shade
    angle += speed
    depth = speed + (hyp / 10)
    col1 = hue_to_rgb[step % 255]
    col1 = (col1[0] * 0.8, col1[1] * 0.8, col1[2] * 0.8)
    col2 = hue_to_rgb[step % 255]
    col2 = (col2[0] * 0.3, col2[1] * 0.3, col2[2] * 0.3)
    col = col1 if int(abs(angle * 6.0)) % 2 == 0 else col2
    td = .3 if int(abs(depth * 3.0)) % 2 == 0 else 0
    col = (col[0] + td, col[1] + td, col[2] + td)
    col = (col[0] * shade, col[1] * shade, col[2] * shade)
    return (col[0] * 255, col[1] * 255, col[2] * 255) 
Example 19
Project: BERT-Classification-Tutorial   Author: Socialbird-AILab   File: modeling.py    Apache License 2.0 5 votes vote down vote up
def gelu(input_tensor):
    """Gaussian Error Linear Unit.

    This is a smoother version of the RELU.
    Original paper: https://arxiv.org/abs/1606.08415

    Args:
      input_tensor: float Tensor to perform activation.

    Returns:
      `input_tensor` with the GELU activation applied.
    """
    cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
    return input_tensor * cdf 
Example 20
Project: simulated-annealing-tsp   Author: chncyhn   File: anneal.py    MIT License 5 votes vote down vote up
def __init__(self, coords, T=-1, alpha=-1, stopping_T=-1, stopping_iter=-1):
        self.coords = coords
        self.N = len(coords)
        self.T = math.sqrt(self.N) if T == -1 else T
        self.T_save = self.T  # save inital T to reset if batch annealing is used
        self.alpha = 0.995 if alpha == -1 else alpha
        self.stopping_temperature = 1e-8 if stopping_T == -1 else stopping_T
        self.stopping_iter = 100000 if stopping_iter == -1 else stopping_iter
        self.iteration = 1

        self.nodes = [i for i in range(self.N)]

        self.best_solution = None
        self.best_fitness = float("Inf")
        self.fitness_list = [] 
Example 21
Project: simulated-annealing-tsp   Author: chncyhn   File: anneal.py    MIT License 5 votes vote down vote up
def dist(self, node_0, node_1):
        """
        Euclidean distance between two nodes.
        """
        coord_0, coord_1 = self.coords[node_0], self.coords[node_1]
        return math.sqrt((coord_0[0] - coord_1[0]) ** 2 + (coord_0[1] - coord_1[1]) ** 2) 
Example 22
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 5 votes vote down vote up
def match_quality(teams, BETA):
    # Set up multivariate gaussians
    u = np.matrix([p.mu for p in itertools.chain.from_iterable(teams)]).T
    summa = np.diagflat(
        [p.sigma ** 2 for p in itertools.chain.from_iterable(teams)])

    total_players = sum(len(x) for x in teams)
    done_players = 0
    A_T = []
    for i in range(len(teams) - 1):
        A_T.append(
            np.array(
                [0] * done_players +
                [1] * len(teams[i]) +
                [-1] * len(teams[i + 1]) +
                [0] * (total_players - done_players -
                       len(teams[i]) - len(teams[i + 1]))
            )
        )
        done_players += len(teams[i])
    A = np.matrix(A_T).T

    common = BETA ** 2 * A.T * A + A.T * summa * A
    exp_part = -0.5 * u.T * A * np.linalg.inv(common) * A.T * u
    sqrt_part = np.linalg.det(BETA ** 2 * A.T * A) / np.linalg.det(common)
    return math.sqrt(sqrt_part) * math.exp(exp_part) 
Example 23
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 5 votes vote down vote up
def gaussian_at(x, mean=0.0, standard_dev=1.0):
    """
    gaussian function at x
    """
    # // See http://mathworld.wolfram.com/NormalDistribution.html
    # // 1 -(x-mean)^2 / (2*stdDev^2)
    # // P(x) = ------------------- * e
    # // stdDev * sqrt(2*pi)
    multiplier = 1.0 / (standard_dev * math.sqrt(2 * math.pi))
    exp_part = (-1.0 * (x - mean) ** 2 / (2 * (standard_dev ** 2)))
    result = multiplier * math.exp(exp_part)
    return result 
Example 24
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 5 votes vote down vote up
def gaussian_cumulative_to(x, mean=0.0, standard_dev=1.0):
    """
    cumulative (error function) to x.
    """
    x = (x - mean) / standard_dev
    return 0.5 + 0.5 * math.erf(x / math.sqrt(2)) 
Example 25
Project: BlueLightMeter   Author: chripell   File: blm_client.py    Apache License 2.0 5 votes vote down vote up
def calc_goal(self):
        if self.which == 'Flash':
            ev = self.ev_max
        else:
            ev = self.ev
        if self.what == 'Av' or self.what == 'Tv':
            delta_ev = math.log(self.ISO / 100.0, 2.0)
            ev += delta_ev
            ev2 = math.pow(2.0, ev)
            if self.what == 'Tv':
                tv = math.pow(self.Av, 2.0) / ev2
                tvn = self.find_nearer(tv, self.TVc)
                self.goal.set_markup('<span size="38000">%s s</span>' % tvn)
                self.goal_ev.set_text('Ev=%.1f' %
                                      (self.calc_ev(self.Av, self.make_float(tvn)) - delta_ev))
            elif self.what == 'Av':
                av = math.sqrt(ev2 * self.Tv)
                avn = self.find_nearer(av, self.AVc)
                self.goal.set_markup('<span size="38000">f/%s</span>' % avn)
                self.goal_ev.set_text('Ev=%.1f' %
                                      (self.calc_ev(float(avn), self.Tv) - delta_ev))
        elif self.what == 'ISO':
            evb = self.calc_ev(self.Av, self.Tv)
            isov = math.pow(2.0, evb - ev) * 100.0
            isovn = self.find_nearer(isov, self.ISOc)
            self.goal.set_markup('<span size="38000">%s ISO</span>' % isovn)
            self.goal_ev.set_text('Ev=%.1f' %
                                  (evb + math.log(float(isovn) / 100.0, 2.0))) 
Example 26
Project: chainer-openai-transformer-lm   Author: soskek   File: opt.py    MIT License 5 votes vote down vote up
def _scheduled_learning_rate(schedule, hp, t):
    if t == 0:
        raise RuntimeError(
            'Can\'t determine the learning rate of Adam optimizer '
            'because the update steps have not been started.')
    fix1 = 1. - math.pow(hp.beta1, t)
    fix2 = 1. - math.pow(hp.beta2, t)
    lrt = hp.alpha * math.sqrt(fix2) / fix1
    lrt *= schedule(t / hp.t_total, hp.warmup)
    return lrt 
Example 27
Project: chainer-openai-transformer-lm   Author: soskek   File: model_py.py    MIT License 5 votes vote down vote up
def gelu(x):
    return 0.5 * x * (1 + F.tanh(math.sqrt(2 / math.pi)
                                 * (x + 0.044715 * (x ** 3)))) 
Example 28
Project: chainer-openai-transformer-lm   Author: soskek   File: model_py.py    MIT License 5 votes vote down vote up
def __call__(self, x):
        # chainer requires explicit broadcast for avoiding latent bugs
        u = F.mean(x, -1, keepdims=True)
        u = F.broadcast_to(u, x.shape)
        s = F.mean((x - u) ** 2, -1, keepdims=True)
        s = F.broadcast_to(s, x.shape)
        x = (x - u) / F.sqrt(s + self.e)
        return F.bias(F.scale(x, self.g, axis=2), self.b, axis=2) 
Example 29
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def __init__(self, titles, increasing, save_to_fp):
        assert len(titles) == len(increasing)
        n_plots = len(titles)
        self.titles = titles
        self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)])
        self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"]

        self.nb_points_max = 500
        self.save_to_fp = save_to_fp
        self.start_batch_idx = 0
        self.autolimit_y = False
        self.autolimit_y_multiplier = 5

        #self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))
        nrows = max(1, int(math.sqrt(n_plots)))
        ncols = int(math.ceil(n_plots / nrows))
        width = ncols * 10
        height = nrows * 10

        self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height))

        if nrows == 1 and ncols == 1:
            self.axes = [self.axes]
        else:
            self.axes = self.axes.flat

        title_to_ax = dict()
        for idx, (title, ax) in enumerate(zip(self.titles, self.axes)):
            title_to_ax[title] = ax
        self.title_to_ax = title_to_ax

        self.fig.tight_layout()
        self.fig.subplots_adjust(left=0.05) 
Example 30
Project: pyblish-win   Author: pyblish   File: test_find.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_libm(self):
        import math
        libm = cdll.libm
        sqrt = libm.sqrt
        sqrt.argtypes = (c_double,)
        sqrt.restype = c_double
        self.assertEqual(sqrt(2), math.sqrt(2)) 
Example 31
Project: pyblish-win   Author: pyblish   File: test_libc.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_sqrt(self):
        lib.my_sqrt.argtypes = c_double,
        lib.my_sqrt.restype = c_double
        self.assertEqual(lib.my_sqrt(4.0), 2.0)
        import math
        self.assertEqual(lib.my_sqrt(2.0), math.sqrt(2.0)) 
Example 32
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_exceptions(self):
        try:
            x = math.exp(-1000000000)
        except:
            # mathmodule.c is failing to weed out underflows from libm, or
            # we've got an fp format with huge dynamic range
            self.fail("underflowing exp() should not have raised "
                        "an exception")
        if x != 0:
            self.fail("underflowing exp() should have returned 0")

        # If this fails, probably using a strict IEEE-754 conforming libm, and x
        # is +Inf afterwards.  But Python wants overflows detected by default.
        try:
            x = math.exp(1000000000)
        except OverflowError:
            pass
        else:
            self.fail("overflowing exp() didn't trigger OverflowError")

        # If this fails, it could be a puzzle.  One odd possibility is that
        # mathmodule.c's macros are getting confused while comparing
        # Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
        # as a result (and so raising OverflowError instead).
        try:
            x = math.sqrt(-1.0)
        except ValueError:
            pass
        else:
            self.fail("sqrt(-1) didn't raise ValueError") 
Example 33
Project: pyblish-win   Author: pyblish   File: test_long.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_float_overflow(self):
        for x in -2.0, -1.0, 0.0, 1.0, 2.0:
            self.assertEqual(float(long(x)), x)

        shuge = '12345' * 120
        huge = 1L << 30000
        mhuge = -huge
        namespace = {'huge': huge, 'mhuge': mhuge, 'shuge': shuge, 'math': math}
        for test in ["float(huge)", "float(mhuge)",
                     "complex(huge)", "complex(mhuge)",
                     "complex(huge, 1)", "complex(mhuge, 1)",
                     "complex(1, huge)", "complex(1, mhuge)",
                     "1. + huge", "huge + 1.", "1. + mhuge", "mhuge + 1.",
                     "1. - huge", "huge - 1.", "1. - mhuge", "mhuge - 1.",
                     "1. * huge", "huge * 1.", "1. * mhuge", "mhuge * 1.",
                     "1. // huge", "huge // 1.", "1. // mhuge", "mhuge // 1.",
                     "1. / huge", "huge / 1.", "1. / mhuge", "mhuge / 1.",
                     "1. ** huge", "huge ** 1.", "1. ** mhuge", "mhuge ** 1.",
                     "math.sin(huge)", "math.sin(mhuge)",
                     "math.sqrt(huge)", "math.sqrt(mhuge)", # should do better
                     "math.floor(huge)", "math.floor(mhuge)"]:

            self.assertRaises(OverflowError, eval, test, namespace)

            # XXX Perhaps float(shuge) can raise OverflowError on some box?
            # The comparison should not.
            self.assertNotEqual(float(shuge), int(shuge),
                "float(shuge) should not equal int(shuge)") 
Example 34
Project: UR5_Controller   Author: tsinghua-rll   File: API.py    MIT License 5 votes vote down vote up
def from_q_to_rad_axis(q):
        """
        :param q: quaternion in w-i-j-k format
        :return: rad in format x-y-z
        """
        norm_axis = math.sqrt(q[1] ** 2 + q[2] ** 2 + q[3] ** 2)
        rad = 2.0 * math.atan2(norm_axis, q[0])
        if norm_axis < 1e-5:
            return np.zeros((3,), dtype=np.float32)
        else:
            return rad * np.asarray(q[1:], dtype=np.float32) / norm_axis 
Example 35
Project: UR5_Controller   Author: tsinghua-rll   File: API.py    MIT License 5 votes vote down vote up
def from_rad_axis_to_q(r):
        """
        :param q: rad in format (x-y-z) * r
        :return: quaternion in w-i-j-k format
        """
        norm_axis = math.sqrt(r[0] ** 2 + r[1] ** 2 + r[2] ** 2)
        if norm_axis < 1e-5:
            return np.asarray((1.0, 0., 0., 0.), dtype=np.float32)
        else:
            return np.asarray((math.cos(0.5 * norm_axis),
                               math.sin(0.5 * norm_axis) * r[0] / norm_axis,
                               math.sin(0.5 * norm_axis) * r[1] / norm_axis,
                               math.sin(0.5 * norm_axis) * r[2] / norm_axis), dtype=np.float32) 
Example 36
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 5 votes vote down vote up
def from_matrix_to_q(mat):
    qw = math.sqrt(max(0, 1 + mat[0][0] + mat[1][1] + mat[2][2])) / 2.0
    qi = math.copysign(math.sqrt(max(0, 1 + mat[0][0] - mat[1][1] - mat[2][2])) / 2.0, mat[2][1] - mat[1][2])
    qj = math.copysign(math.sqrt(max(0, 1 - mat[0][0] + mat[1][1] - mat[2][2])) / 2.0, mat[0][2] - mat[2][0])
    qk = math.copysign(math.sqrt(max(0, 1 - mat[0][0] - mat[1][1] + mat[2][2])) / 2.0, mat[1][0] - mat[0][1])
    return np.asarray((qw, qi, qj, qk), dtype=np.float32) 
Example 37
Project: building-boundary   Author: Geodan   File: alpha_shape.py    MIT License 5 votes vote down vote up
def triangle_geometry(triangle):
    """
    Compute the area and circumradius of a triangle.

    Parameters
    ----------
    triangle : (3x3) array-like
        The coordinates of the points which form the triangle.

    Returns
    -------
    area : float
        The area of the triangle
    circum_r : float
        The circumradius of the triangle
    """
    pa, pb, pc = triangle
    # Lengths of sides of triangle
    a = math.hypot((pa[0] - pb[0]), (pa[1] - pb[1]))
    b = math.hypot((pb[0] - pc[0]), (pb[1] - pc[1]))
    c = math.hypot((pc[0] - pa[0]), (pc[1] - pa[1]))
    # Semiperimeter of triangle
    s = (a + b + c) / 2.0
    # Area of triangle by Heron's formula
    area = math.sqrt(s * (s - a) * (s - b) * (s - c))
    if area != 0:
        circum_r = (a * b * c) / (4.0 * area)
    else:
        circum_r = 0
    return area, circum_r 
Example 38
Project: building-boundary   Author: Geodan   File: segment.py    MIT License 5 votes vote down vote up
def dist_points_line(self):
        """
        Computes the distances from each point to the fitted line.

        Attributes
        ----------
        distances : (1xN) array
            The distances from each point to the fitted line.

        .. [1] https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
        """
        self.distances = (abs(self.a * self.points[:, 0] +
                              self.b * self.points[:, 1] + self.c) /
                          math.sqrt(self.a**2 + self.b**2)) 
Example 39
Project: python-samples   Author: dek-odoo   File: dek_program006.py    Apache License 2.0 5 votes vote down vote up
def nsqrt():
    print "sample input ", '100,150,180'
    c = 50
    h = 30
    d = getNumberListFromConsole()
    # [18.24828759089466, 22.360679774997898, 24.49489742783178]
    q = [math.sqrt((2 * c * dk) / h) for dk in d]
    q = [int(x) for x in q]  # [18, 22, 24]
    print 'nsqrt : ', q
    return q 
Example 40
Project: CozmoCommander   Author: cozmobotics   File: CozmoCommander.py    Apache License 2.0 5 votes vote down vote up
def check_tol(charger: cozmo.objects.Charger,dist_charger=40):
    # Check if the position tolerance in front of the charger is respected
    global RobotGlobal
    robot = RobotGlobal
    global PI

    distance_tol = 5 # mm, tolerance for placement error
    angle_tol = 5*PI/180 # rad, tolerance for orientation error

    try: 
        charger = robot.world.wait_for_observed_charger(timeout=2,include_existing=True)
    except:
        debug (1,'WARNING: Cannot see the charger to verify the position.')

    # Calculate positions
    r_coord = [0,0,0]
    c_coord = [0,0,0]
    # Coordonates of robot and charger
    r_coord[0] = robot.pose.position.x #.x .y .z, .rotation otherwise
    r_coord[1] = robot.pose.position.y
    r_coord[2] = robot.pose.position.z
    r_zRot = robot.pose_angle.radians # .degrees or .radians
    c_coord[0] = charger.pose.position.x
    c_coord[1] = charger.pose.position.y
    c_coord[2] = charger.pose.position.z
    c_zRot = charger.pose.rotation.angle_z.radians

    # Create target position 
    # dist_charger in mm, distance if front of charger
    c_coord[0] -=  dist_charger*math.cos(c_zRot)
    c_coord[1] -=  dist_charger*math.sin(c_zRot)

    # Direction and distance to target position (in front of charger)
    distance = math.sqrt((c_coord[0]-r_coord[0])**2 + (c_coord[1]-r_coord[1])**2 + (c_coord[2]-r_coord[2])**2)

    if(distance < distance_tol and math.fabs(r_zRot-c_zRot) < angle_tol):
    	return 1
    else: 
    	return 0 
Example 41
Project: CozmoCommander   Author: cozmobotics   File: CozmoCommander.py    Apache License 2.0 5 votes vote down vote up
def drawLineMouseUp (event):
	global FlagMouseDown
	global LineCoords
	global Line
	global MapCanvas
	
	if (FlagMouseDown == True):		# +++ no longer needed as we use <Release> instead of <Move>
		FlagMouseDown = False
		
		MapCanvas.itemconfig (Line, fill="darkred")
		MapCanvas.itemconfig (Line, width=3)
		
		DeltaX = canvas2worldX(LineCoords[2]) - canvas2worldX(LineCoords[0])
		DeltaY = canvas2worldY(LineCoords[3]) - canvas2worldY(LineCoords[1])
		
		Angle = angle360 (DeltaX, DeltaY)
		debug (5,"Angle: " + str(Angle))
		# Angle += 90
		# if (Angle > 360):
			# Angle -= 360

		Wall = RobotGlobal.world.create_custom_fixed_object(
					Pose(canvas2worldX(LineCoords[0]) + DeltaX/2, canvas2worldY(LineCoords[1]) + DeltaY/2, 0, angle_z=degrees(Angle)),
					math.sqrt(DeltaX ** 2 + DeltaY ** 2), 10, 100, 
					relative_to_robot=False)			

		if Wall:
			debug (3,"Wall created successfully")
		else:
			debug (1,"Could not create wall")

#---------------------------------------------------------------------------- 
Example 42
Project: comet-commonsense   Author: atcbosselut   File: gpt.py    Apache License 2.0 5 votes vote down vote up
def gelu(x):
    return (0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) *
            (x + 0.044715 * torch.pow(x, 3))))) 
Example 43
Project: comet-commonsense   Author: atcbosselut   File: gpt.py    Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        u = x.mean(-1, keepdim=True)
        s = (x - u).pow(2).mean(-1, keepdim=True)
        x = (x - u) / torch.sqrt(s + self.e)
        return self.g * x + self.b 
Example 44
Project: OpenAPS   Author: medicinexlab   File: oldpred.py    MIT License 5 votes vote down vote up
def _plot_old_pred_data(old_pred_data, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, algorithm_str, minutes_str):
    actual_bg_array = old_pred_data.result_actual_bg_array
    actual_bg_time_array = old_pred_data.result_actual_bg_time_array
    pred_array = old_pred_data.result_pred_array
    pred_time_array = old_pred_data.result_pred_time_array

    #Root mean squared error
    rms = math.sqrt(metrics.mean_squared_error(actual_bg_array, pred_array))
    print "                Root Mean Squared Error: " + str(rms)
    print "                Mean Absolute Error: " + str(metrics.mean_absolute_error(actual_bg_array, pred_array))
    print "                R^2 Coefficient of Determination: " + str(metrics.r2_score(actual_bg_array, pred_array))

    plot, zone = ClarkeErrorGrid.clarke_error_grid(actual_bg_array, pred_array, id_str + " " + algorithm_str + " " + minutes_str)
    print "                Percent A:{}".format(float(zone[0]) / (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
    print "                Percent C, D, E:{}".format(float(zone[2] + zone[3] + zone[4])/ (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
    print "                Zones are A:{}, B:{}, C:{}, D:{}, E:{}\n".format(zone[0],zone[1],zone[2],zone[3],zone[4])
    if save_clarke_plot: plt.savefig(id_str + algorithm_str.replace(" ", "") + minutes_str + "clarke.png")
    if show_clarke_plot: plot.show()

    plt.clf()
    plt.plot(actual_bg_time_array, actual_bg_array, label="Actual BG", color='black', linestyle='-')
    plt.plot(pred_time_array, pred_array, label="BG Prediction", color='black', linestyle=':')
    plt.title(id_str + " " + algorithm_str + " " + minutes_str + " BG Analysis")
    plt.ylabel("Blood Glucose Level (mg/dl)")
    plt.xlabel("Time (minutes)")
    plt.legend(loc='upper left')

    # SHOW/SAVE PLOT DEPENDING ON THE BOOLEAN PARAMETER
    if save_pred_plot: plt.savefig(id_str + algorithm_str.replace(" ","") + minutes_str + "plot.png")
    if show_pred_plot: plt.show()


#Function to analyze the old OpenAPS data 
Example 45
Project: CSL_Hamburg_Noise   Author: CityScope   File: CityScopeTable.py    GNU General Public License v3.0 5 votes vote down vote up
def get_flipped_origin(self, origin):
        table_width = self.get_table_column_count() * self.table_cell_size
        table_height = self.get_table_row_count() * self.table_cell_size
        diagonal = math.sqrt(pow(table_width, 2) + pow(table_height, 2))
        diagonal_angle = math.degrees(math.atan(table_width/table_height))
        angle_diagonal_to_x_axis = diagonal_angle - self.get_table_rotation()
        delta_x = math.sin(math.radians(angle_diagonal_to_x_axis)) * diagonal
        delta_y = math.cos(math.radians(angle_diagonal_to_x_axis)) * diagonal
        flipped_x = origin.x - delta_x
        flipped_y = origin.y + delta_y

        return Point(flipped_x, flipped_y) 
Example 46
Project: CSL_Hamburg_Noise   Author: CityScope   File: GridCell.py    GNU General Public License v3.0 5 votes vote down vote up
def get_cell_corner(self, angle):
        if (angle % 90 == 0):
            distance = self.get_cell_size()
        elif (angle % 45 == 0):
            distance = self.get_cell_size() * math.sqrt(2)
        else:
            raise Exception('The angle does not correspond to a corner in a square. Given angle: {}'.format(angle))

        # direction by table rotation and angle of the corner
        bearing = angle + self.get_table_rotation()

        corner_x = self.get_origin().x + distance * math.sin(math.radians(bearing))
        corner_y = self.get_origin().y + distance * math.cos(math.radians(bearing))

        return Point(corner_x, corner_y) 
Example 47
Project: LSTM_LN   Author: exe1023   File: lstm.py    MIT License 5 votes vote down vote up
def init_hidden(self, batch_size):
        # Uses Xavier init here.
        hiddens = []
        for l in self.layers:
            std = math.sqrt(2.0 / (l.input_size + l.hidden_size))
            h = Variable(Tensor(1, batch_size, l.hidden_size).normal_(0, std))
            c = Variable(Tensor(1, batch_size, l.hidden_size).normal_(0, std))
            if use_cuda:
                hiddens.append((h.cuda(), c.cuda()))
            else:
                hiddens.append((h, c))
        return hiddens 
Example 48
Project: LSTM_LN   Author: exe1023   File: lstm.py    MIT License 5 votes vote down vote up
def reset_parameters(self):
        std = 1.0 / math.sqrt(self.input_size)
        for w in self.parameters():
            w.data.uniform_(-std, std) 
Example 49
Project: LSTM_LN   Author: exe1023   File: lstm.py    MIT License 5 votes vote down vote up
def forward(self, x, image_emb):
        if image_emb is None:
            return x
        # x: (batch, input_size)
        size = x.size()
        x = x.view(x.size(0), -1)
        x = (x - torch.mean(x, 1).unsqueeze(1).expand_as(x)) / torch.sqrt(torch.var(x, 1).unsqueeze(1).expand_as(x) + self.epsilon)

        delta_alpha, delta_beta = self.create_cln_input(image_emb)
        alpha = self.alpha.expand_as(x) + delta_alpha
        beta = self.beta.expand_as(x) + delta_beta
        x =  alpha * x + beta
        return x.view(size) 
Example 50
Project: LSTM_LN   Author: exe1023   File: lstm.py    MIT License 5 votes vote down vote up
def reset_parameters(self):
        std = 1.0 / math.sqrt(self.input_size)
        for w in self.parameters():
            w.data.uniform_(-std, std) 
Example 51
Project: LSTM_LN   Author: exe1023   File: lstm.py    MIT License 5 votes vote down vote up
def forward(self, x):
        size = x.size()
        x = x.view(x.size(0), -1)
        x = (x - torch.mean(x, 1).unsqueeze(1).expand_as(x)) / torch.sqrt(torch.var(x, 1).unsqueeze(1).expand_as(x) + self.epsilon)
        if self.learnable:
            x =  self.alpha.expand_as(x) * x + self.beta.expand_as(x)
        return x.view(size) 
Example 52
Project: mmdetection   Author: open-mmlab   File: deform_conv.py    Apache License 2.0 5 votes vote down vote up
def reset_parameters(self):
        n = self.in_channels
        for k in self.kernel_size:
            n *= k
        stdv = 1. / math.sqrt(n)
        self.weight.data.uniform_(-stdv, stdv) 
Example 53
Project: mmdetection   Author: open-mmlab   File: deform_conv.py    Apache License 2.0 5 votes vote down vote up
def reset_parameters(self):
        n = self.in_channels
        for k in self.kernel_size:
            n *= k
        stdv = 1. / math.sqrt(n)
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.zero_() 
Example 54
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: resnext.py    MIT License 5 votes vote down vote up
def __init__(self, block, depth, cardinality, base_width, num_classes, n_dim):
        super(GenericResNeXt, self).__init__()

        # Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
        assert (depth - 2) % 9 == 0, 'depth should be one of 29, 38, 47, 56, 101'
        layer_blocks = (depth - 2) // 9

        self.cardinality = cardinality
        self.base_width = base_width
        self.num_classes = num_classes
        self.sig = nn.Sigmoid()

        self.conv_1_3x3 = nn.Conv2d(n_dim, 64, 3, 1, 1, bias=False)
        self.bn_1 = nn.BatchNorm2d(64)

        self.inplanes = 64
        self.stage_1 = self._make_layer(block, 64, layer_blocks, 1)
        self.stage_2 = self._make_layer(block, 128, layer_blocks, 2)
        self.stage_3 = self._make_layer(block, 256, layer_blocks, 2)
        self.avgpool = nn.AvgPool2d(8)
        self.classifier = nn.Linear(4096, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                init.kaiming_normal(m.weight)
                m.bias.data.zero_() 
Example 55
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: wrn.py    MIT License 5 votes vote down vote up
def __init__(self, depth, num_classes=1, widen_factor=1, imgDim=1, dropRate=0.0):
        super(WideResNet, self).__init__()
        nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
        assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
        n = int((depth - 4) / 6)
        block = BasicBlock
        self.num_classes = num_classes
        # 1st conv before any network block
        self.conv1 = nn.Conv2d(imgDim, nChannels[0], kernel_size=3, stride=1,
                               padding=1, bias=False)
        # 1st block
        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
        # 2nd block
        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
        # 3rd block
        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
        # global average pooling and classifier
        self.bn1 = nn.BatchNorm2d(nChannels[3])
        self.relu = nn.ReLU(inplace=True)
        self.fc = nn.Linear(4096, self.num_classes)
        self.nChannels = nChannels[3]
        self.sig = nn.Sigmoid()
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_() 
Example 56
Project: simian   Author: Desenho2018-1   File: vector.py    MIT License 5 votes vote down vote up
def __len__(self):
        """
        Length or magnitude of a 2D vector
        computed using pythagorean theorem.
        """
        return sqrt(pow(self.x, 2)+pow(self.y, 2)) 
Example 57
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def test_net(i, name):

	model = get_weights(i, name)
	print 'using weights from net trained on dataset {0} for {1}'. format(i, name)
	history = LossAccHistory()

    	(X_train, y_train), (X_test, y_test) = get_data(i, name)

    	Y_test = np_utils.to_categorical(y_test, nb_classes)

    	X_test /= 255

    	print(X_test.shape[0], 'test samples')
 
    	model.compile(loss='binary_crossentropy', 
                 optimizer= rmsprop(lr=0.001), #adadelta
		 metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
        
	ypred = model.predict_classes(X_test, verbose=1)
	ytrue = Y_test
	
	tp, tn, fp, fn = contingency(y_test, ypred)

	print '           |     true label\n---------------------------------'
	print 'pred label |  positive | negative'
	print 'positive   |     ', tp, ' |  ', fp
	print 'negative   |     ', fn, '  |  ', tn 

	prec = float(tp)/(tp+fp)
	se = float(tp) / (tp + fn)
	sp = float(tn) / (fp + tn)
	mcc = float(tp*tn - tp*fn)/(math.sqrt((tp + fp)*(tp+fn)*(tn+fp)*(tn+fn)))
	f1 = (2*prec*se)/(prec+se)
	acc = float(tp+tn)/(tp+tn+fp+fn)
	print '     sens     |     spec     |     mcc      |      f1      |      prec      |     acc       '
	print se, sp, mcc, f1, prec, acc
	
    	model.reset_states()
	return [se, sp, mcc, f1, prec, acc] 
Example 58
Project: MODS_ConvNet   Author: santiagolopezg   File: test_labcrossval_network.py    MIT License 5 votes vote down vote up
def test_net(i, name):

	model = get_weights(i, name)
	print 'using weights from net trained on dataset {0} for {1}'. format(i, name)
	history = LossAccHistory()

    	(X_train, y_train), (X_test, y_test) = get_data(i, name)

    	Y_test = np_utils.to_categorical(y_test, nb_classes)

    	X_test /= 255

    	print(X_test.shape[0], 'test samples')
 
    	model.compile(loss='binary_crossentropy', 
                 optimizer= rmsprop(lr=0.001), #adadelta
		 metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
        
	ypred = model.predict_classes(X_test, verbose=1)
	ytrue = Y_test	

	
	tp, tn, fp, fn = contingency(y_test, ypred)

	print '           |     true label\n---------------------------------'
	print 'pred label |  positive | negative'
	print 'positive   |     ', tp, ' |  ', fp
	print 'negative   |     ', fn, '  |  ', tn 

	prec = float(tp)/(tp+fp)
	se = float(tp) / (tp + fn)
	sp = float(tn) / (fp + tn)
	mcc = float(tp*tn - tp*fn)/(math.sqrt((tp + fp)*(tp+fn)*(tn+fp)*(tn+fn)))
	f1 = (2*prec*se)/(prec+se)
	acc = float(tp+tn)/(tp+tn+fp+fn)
	print '     sens     |     spec     |     mcc      |      f1      |      prec      |     acc       '
	print se, sp, mcc, f1, prec, acc

    	model.reset_states()
	return [se, sp, mcc, f1, prec, acc] 
Example 59
Project: subword-qac   Author: clovaai   File: model.py    MIT License 5 votes vote down vote up
def reset_parameters(self):
        stdv = 1.0 / math.sqrt(self.hidden_size)
        for weight in self.parameters():
            weight.data.uniform_(-stdv, stdv)
        if self.layernorm:
            for ln in self.layernorm:
                ln.reset_parameters() 
Example 60
Project: snake   Author: valentinmace   File: map.py    MIT License 5 votes vote down vote up
def distance(p1=None, p2=None):
    """
    Gives euclidian distance between two points
    @jit is used to speed up computation

    :param p1: origin point
    :param p2: end point
    :return: distance
    """
    return math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) 
Example 61
Project: CLRS   Author: JasonVann   File: Fibonacci_Heap.py    MIT License 5 votes vote down vote up
def D(n):
    import math
    thi = 0.5*(math.sqrt(5)+1)
    return math.ceil(math.log(n, thi)) 
Example 62
Project: Random-Erasing   Author: zhunzhong07   File: transforms.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, img):

        if random.uniform(0, 1) > self.probability:
            return img

        for attempt in range(100):
            area = img.size()[1] * img.size()[2]
       
            target_area = random.uniform(self.sl, self.sh) * area
            aspect_ratio = random.uniform(self.r1, 1/self.r1)

            h = int(round(math.sqrt(target_area * aspect_ratio)))
            w = int(round(math.sqrt(target_area / aspect_ratio)))

            if w < img.size()[2] and h < img.size()[1]:
                x1 = random.randint(0, img.size()[1] - h)
                y1 = random.randint(0, img.size()[2] - w)
                if img.size()[0] == 3:
                    img[0, x1:x1+h, y1:y1+w] = self.mean[0]
                    img[1, x1:x1+h, y1:y1+w] = self.mean[1]
                    img[2, x1:x1+h, y1:y1+w] = self.mean[2]
                else:
                    img[0, x1:x1+h, y1:y1+w] = self.mean[0]
                return img

        return img 
Example 63
Project: Random-Erasing   Author: zhunzhong07   File: wrn.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
        super(WideResNet, self).__init__()
        nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
        assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
        n = (depth - 4) / 6
        block = BasicBlock
        # 1st conv before any network block
        self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
                               padding=1, bias=False)
        # 1st block
        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
        # 2nd block
        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
        # 3rd block
        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
        # global average pooling and classifier
        self.bn1 = nn.BatchNorm2d(nChannels[3])
        self.relu = nn.ReLU(inplace=True)
        self.fc = nn.Linear(nChannels[3], num_classes)
        self.nChannels = nChannels[3]

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_() 
Example 64
Project: Random-Erasing   Author: zhunzhong07   File: resnet.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, depth, num_classes=1000):
        super(ResNet, self).__init__()
        # Model type specifies number of layers for CIFAR-10 model
        assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
        n = (depth - 2) // 6

        block = Bottleneck if depth >=44 else BasicBlock

        self.inplanes = 16
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(16)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self._make_layer(block, 16, n)
        self.layer2 = self._make_layer(block, 32, n, stride=2)
        self.layer3 = self._make_layer(block, 64, n, stride=2)
        self.avgpool = nn.AvgPool2d(8)
        self.fc = nn.Linear(64 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example 65
Project: Random-Erasing   Author: zhunzhong07   File: wrn.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
        super(WideResNet, self).__init__()
        nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
        assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
        n = (depth - 4) // 6
        block = BasicBlock
        # 1st conv before any network block
        self.conv1 = nn.Conv2d(1, nChannels[0], kernel_size=3, stride=1,
                               padding=1, bias=False)
        # 1st block
        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
        # 2nd block
        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
        # 3rd block
        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
        # global average pooling and classifier
        self.bn1 = nn.BatchNorm2d(nChannels[3])
        self.relu = nn.ReLU(inplace=True)
        self.fc = nn.Linear(nChannels[3], num_classes)
        self.nChannels = nChannels[3]

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_() 
Example 66
Project: Random-Erasing   Author: zhunzhong07   File: resnet.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, depth, num_classes=1000):
        super(ResNet, self).__init__()
        # Model type specifies number of layers for CIFAR-10 model
        assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
        n = (depth - 2) / 6

        block = Bottleneck if depth >=44 else BasicBlock

        self.inplanes = 16
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(16)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self._make_layer(block, 16, n)
        self.layer2 = self._make_layer(block, 32, n, stride=2)
        self.layer3 = self._make_layer(block, 64, n, stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(64 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example 67
Project: look_around   Author: iqbalhusen   File: haversine.py    MIT License 5 votes vote down vote up
def distance(origin, destination):
    """
    Calculate the great circle distance between two points 
    on the earth (specified in decimal degrees)
    """
    # convert decimal degrees to radians 
    lon1, lat1, lon2, lat2 = map(radians, origin + destination)

    # Haversine formula
    dlon = lon2 - lon1 
    dlat = lat2 - lat1 
    a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
    c = 2 * asin(sqrt(a)) 
    r = 6371  # Radius of earth in kilometers. Use 3956 for miles
    return c * r 
Example 68
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 4 votes vote down vote up
def calculate_nvn(team_a, team_b, was_win, DRAW_PROBABILITY, BETA, DYNAMICS_FACTOR):
    """
    Calculates new trueskills for a two team game.
    Scores are translated to win/loss/draw.
    Teams are lists of players with 'mu' and 'sigma'
    If it wasnt a win it was a draw.
    """
    draw_margin = get_draw_margin_from_draw_probability(
        DRAW_PROBABILITY, BETA, len(team_a) + len(team_b))

    c = math.sqrt(sum([p.sigma ** 2 for p in team_a]) +
                  sum([p.sigma ** 2 for p in team_b]) +
                  len(team_a + team_b) * BETA ** 2)
    skill_a = sum([p.mu for p in team_a])
    skill_b = sum([p.mu for p in team_b])

    # winner - loser
    if was_win:
        skill_delta_a = skill_a - skill_b
        skill_delta_b = skill_delta_a
    else:
        skill_delta_a = skill_a - skill_b
        skill_delta_b = skill_b - skill_a

    def update_team_ratings(team, mean_delta, is_draw, is_winner):
        """
        helper for doing the maths per team.
        """
        assert not (is_draw and is_winner)
        rank_multiplier = 1.0 if is_winner or is_draw else -1.0
        if not is_draw:
            v = v_exceeds_margin(mean_delta, draw_margin, c)
            w = w_exceeds_margin(mean_delta, draw_margin, c)
        else:
            v = v_within_margin(mean_delta, draw_margin, c)
            w = w_within_margin(mean_delta, draw_margin, c)

        for player in team:
            mean_multiplier = (player.sigma ** 2 + DYNAMICS_FACTOR ** 2) / c
            variance_with_dynamics = player.sigma ** 2 + DYNAMICS_FACTOR ** 2
            std_dev_multiplier = variance_with_dynamics / (c ** 2)
            # print mean_delta, is_draw, is_winner, player.mu, rank_multiplier,
            # mean_multiplier, v
            new_mean = player.mu + (rank_multiplier * mean_multiplier * v)
            new_std_dev = math.sqrt(
                variance_with_dynamics * (1 - w * std_dev_multiplier))

            player.mu = new_mean
            player.sigma = new_std_dev
    # print "game:"
    update_team_ratings(
        team_a, skill_delta_a, not was_win, was_win)
    update_team_ratings(
        team_b, skill_delta_b, not was_win, False) 
Example 69
Project: Physics_Project   Author: QuandisS   File: core_functions.py    Apache License 2.0 4 votes vote down vote up
def return_the_instructions(var):
        if var == 'v0':
            return ['v0x / cos_a', 'v0y / sin_a', '( x - x0 ) / ( cos_a * t ) ', '( L - x0 ) / ( cos_a * t_all )', '( y - y0 + g * t * t / 2 ) / ( sin_a * t )', '( t_all * g ) / ( 2 * sin_a )', 'math.sqrt( ( 2 * g ) / cmath.sin( 2 * alpha ) )', '( vy + g * t ) / sin_a', 'math.sqrt( vy * vy + 2 * g * y ) / sin_a', 'math.sqrt( h_max * 2 * g ) / sin_a']

        if var == 'alpha':
            return ['math.radians( cmath.acos ( cos_a ) )', 'math.radians( cmath.asin ( sin_a ))', 'math.radians( cmath.asin( ( ( L * g ) / ( v0 * v0 )) / 2 )', 'math.radians( math.sqrt( cmath.asin( ( h_max * 2 * g ) / ( v0 * v0 ) ) ))']

        if var == 'g':
            return ['M * G / ( r * r )']

        if var == 'v0x':
            return ['v0 * cos_a', '( x - x0 ) / t', '( L - x0 ) / t_all']

        if var == 'v0y':
            return ['v0 * sin_a', '( y - y0 + g * t * t / 2 ) / t', 't_all * g / 2', 'vy + g * t', 'math.sqrt( vy * vy + 2 * g * y )', 'math.sqrt( h_max * 2 * g )']

        if var == 'vy':
            return ['v0y - g * t', 'v0 * sin_a - g * t', 'math.sqrt( v0y * v0y - 2 * g * y )']

        if var == 't_all':
            return ['( L - x0 ) / ( v0 * cos_a )', '( L - x0 ) / v0x']

        if var == 't':
            return ['( v0y - vy ) / g', '( v0y - v0 * sin_a ) / g', '( x - x0 ) / v0x', '( x - x0 )  / v0 * cos_a']

        if var == 'h_max':
            return ['y0 + v0 * sin_a * 0.5 * t_all - 0.25 * g * t_all * t_all / 2', 'y0 + v0y * 0.5 * t_all - 0.25 * g * t_all * t_all / 2']

        if var == 'x0':
            return ['x - vox * t', 'x - v0 * cos_a * t', 'L - v0 * cos_a * t_all', 'L - v0x * t_all']

        if var == 'x':
            return ['x0 + v0x * t', 'x0 + v0 * cos_a * t']

        if var == 'y0':
            return ['y - v0y * t + g * t * t / 2', 'y - v0 * sin_a * t + g * t * t / 2', 'h_max - v0y * 0.5 * t_all + 0.25 * g * t_all * t_all / 2', 'h_max - v0 * sin_a * 0.5 * t_all + g * t_all * 0.25 * t_all / 2']

        if var == 'y':
            return ['y0 + v0y * t - g * t * t / 2', 'y0 + v0 * sin_a * t - g * t * t / 2', '( vy * vy - v0y * v0y ) / ( -2 * g )', '( vy * vy - v0 * sin_a * v0 * sin_a ) / ( -2 * g )']

        if var == 'L':
            return ['x0 + v0 * cos_a * t_all', 'x0 + v0x * t_all', 'v0 * v0 * math.sin( alpha * 2 ) / g']

        if var == 'F':
            return ['test', 'test']

        if var == 'm':
            return ['test', 'test']

        if var == 'sin_a':
            return ['cmath.sin( alpha )', 'v0y / v0', '( y - y0 + g * t * t / 2 ) / ( v0 * t )', 't_all * g / ( 2 * v0 )', '( t * g + vy ) / v0', 'math.sqrt( h_max * 2 * g ) / v0']

        if var == 'cos_a':
            return ['v0x / v0', '( L - x0 ) / ( v0 * t_all )', '( x - x0 ) / ( v0 * t )', 'cmath.cos( alpha )']


######


# функция берет массив инструкций и словарь переменных
# и ищет инстукцию в которой все переменные известны 
Example 70
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def vonmisesvariate(self, mu, kappa):
        """Circular data distribution.

        mu is the mean angle, expressed in radians between 0 and 2*pi, and
        kappa is the concentration parameter, which must be greater than or
        equal to zero.  If kappa is equal to zero, this distribution reduces
        to a uniform random angle over the range 0 to 2*pi.

        """
        # mu:    mean angle (in radians between 0 and 2*pi)
        # kappa: concentration parameter kappa (>= 0)
        # if kappa = 0 generate uniform random angle

        # Based upon an algorithm published in: Fisher, N.I.,
        # "Statistical Analysis of Circular Data", Cambridge
        # University Press, 1993.

        # Thanks to Magnus Kessler for a correction to the
        # implementation of step 4.

        random = self.random
        if kappa <= 1e-6:
            return TWOPI * random()

        s = 0.5 / kappa
        r = s + _sqrt(1.0 + s * s)

        while 1:
            u1 = random()
            z = _cos(_pi * u1)

            d = z / (r + z)
            u2 = random()
            if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
                break

        q = 1.0 / r
        f = (q + z) / (1.0 + q * z)
        u3 = random()
        if u3 > 0.5:
            theta = (mu + _acos(f)) % TWOPI
        else:
            theta = (mu - _acos(f)) % TWOPI

        return theta

## -------------------- gamma distribution -------------------- 
Example 71
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def gauss(self, mu, sigma):
        """Gaussian distribution.

        mu is the mean, and sigma is the standard deviation.  This is
        slightly faster than the normalvariate() function.

        Not thread-safe without a lock around calls.

        """

        # When x and y are two variables from [0, 1), uniformly
        # distributed, then
        #
        #    cos(2*pi*x)*sqrt(-2*log(1-y))
        #    sin(2*pi*x)*sqrt(-2*log(1-y))
        #
        # are two *independent* variables with normal distribution
        # (mu = 0, sigma = 1).
        # (Lambert Meertens)
        # (corrected version; bug discovered by Mike Miller, fixed by LM)

        # Multithreading note: When two threads call this function
        # simultaneously, it is possible that they will receive the
        # same return value.  The window is very small though.  To
        # avoid this, you have to use a lock around all calls.  (I
        # didn't want to slow this down in the serial case by using a
        # lock here.)

        random = self.random
        z = self.gauss_next
        self.gauss_next = None
        if z is None:
            x2pi = random() * TWOPI
            g2rad = _sqrt(-2.0 * _log(1.0 - random()))
            z = _cos(x2pi) * g2rad
            self.gauss_next = _sin(x2pi) * g2rad

        return mu + z*sigma

## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
##    def betavariate(self, alpha, beta):
##        # Discrete Event Simulation in C, pp 87-88.
##
##        y = self.expovariate(alpha)
##        z = self.expovariate(1.0/beta)
##        return z/(y+z)
##
## was dead wrong, and how it probably got that way. 
Example 72
Project: pyblish-win   Author: pyblish   File: test_long.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def test_auto_overflow(self):
        special = [0, 1, 2, 3, sys.maxint-1, sys.maxint, sys.maxint+1]
        sqrt = int(math.sqrt(sys.maxint))
        special.extend([sqrt-1, sqrt, sqrt+1])
        special.extend([-i for i in special])

        def checkit(*args):
            # Heavy use of nested scopes here!
            self.assertEqual(got, expected,
                Frm("for %r expected %r got %r", args, expected, got))

        for x in special:
            longx = long(x)

            expected = -longx
            got = -x
            checkit('-', x)

            for y in special:
                longy = long(y)

                expected = longx + longy
                got = x + y
                checkit(x, '+', y)

                expected = longx - longy
                got = x - y
                checkit(x, '-', y)

                expected = longx * longy
                got = x * y
                checkit(x, '*', y)

                if y:
                    with test_support.check_py3k_warnings():
                        expected = longx / longy
                        got = x / y
                    checkit(x, '/', y)

                    expected = longx // longy
                    got = x // y
                    checkit(x, '//', y)

                    expected = divmod(longx, longy)
                    got = divmod(longx, longy)
                    checkit(x, 'divmod', y)

                if abs(y) < 5 and not (x == 0 and y < 0):
                    expected = longx ** longy
                    got = x ** y
                    checkit(x, '**', y)

                    for z in special:
                        if z != 0 :
                            if y >= 0:
                                expected = pow(longx, longy, long(z))
                                got = pow(x, y, z)
                                checkit('pow', x, y, '%', z)
                            else:
                                self.assertRaises(TypeError, pow,longx, longy, long(z)) 
Example 73
Project: LipNet-PyTorch   Author: sailordiary   File: model.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __init__(self, opt, vocab_size):
        super(LipNet, self).__init__()
        self.opt = opt
        self.conv = nn.Sequential(
            nn.Conv3d(3, 32, kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2)),
            nn.ReLU(True),
            nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
            nn.Dropout3d(opt.dropout),
            nn.Conv3d(32, 64, kernel_size=(3, 5, 5), stride=(1, 1, 1), padding=(1, 2, 2)),
            nn.ReLU(True),
            nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
            nn.Dropout3d(opt.dropout),
            nn.Conv3d(64, 96, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
            nn.ReLU(True),
            nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
            nn.Dropout3d(opt.dropout)
        )
        # T B C*H*W
        self.gru1 = nn.GRU(96 * 3 * 6, opt.rnn_size, 1, bidirectional=True)
        self.drp1 = nn.Dropout(opt.dropout)
        # T B F
        self.gru2 = nn.GRU(opt.rnn_size * 2, opt.rnn_size, 1, bidirectional=True)
        self.drp2 = nn.Dropout(opt.dropout)
        # T B V
        self.pred = nn.Linear(opt.rnn_size * 2, vocab_size + 1)
        
        # initialisations
        for m in self.conv.modules():
            if isinstance(m, nn.Conv3d):
                init.kaiming_normal_(m.weight, nonlinearity='relu')
                init.constant_(m.bias, 0)

        init.kaiming_normal_(self.pred.weight, nonlinearity='sigmoid')
        init.constant_(self.pred.bias, 0)

        for m in (self.gru1, self.gru2):
            stdv = math.sqrt(2 / (96 * 3 * 6 + opt.rnn_size))
            for i in range(0, opt.rnn_size * 3, opt.rnn_size):
                init.uniform_(m.weight_ih_l0[i: i + opt.rnn_size],
                            -math.sqrt(3) * stdv, math.sqrt(3) * stdv)
                init.orthogonal_(m.weight_hh_l0[i: i + opt.rnn_size])
                init.constant_(m.bias_ih_l0[i: i + opt.rnn_size], 0)
                init.uniform_(m.weight_ih_l0_reverse[i: i + opt.rnn_size],
                            -math.sqrt(3) * stdv, math.sqrt(3) * stdv)
                init.orthogonal_(m.weight_hh_l0_reverse[i: i + opt.rnn_size])
                init.constant_(m.bias_ih_l0_reverse[i: i + opt.rnn_size], 0) 
Example 74
Project: CozmoCommander   Author: cozmobotics   File: CozmoCommander.py    Apache License 2.0 4 votes vote down vote up
def final_adjust(charger: cozmo.objects.Charger,dist_charger=40,speed=40,critical=False):
    # Final adjustement to properly face the charger.
    # The position can be adjusted several times if 
    # the precision is critical, i.e. when climbing
    # back onto the charger.  
    global RobotGlobal
    robot = RobotGlobal
    global PI

    while(True):
        # Calculate positions
	    r_coord = [0,0,0]
	    c_coord = [0,0,0]
	    # Coordonates of robot and charger
	    r_coord[0] = robot.pose.position.x #.x .y .z, .rotation otherwise
	    r_coord[1] = robot.pose.position.y
	    r_coord[2] = robot.pose.position.z
	    r_zRot = robot.pose_angle.radians # .degrees or .radians
	    c_coord[0] = charger.pose.position.x
	    c_coord[1] = charger.pose.position.y
	    c_coord[2] = charger.pose.position.z
	    c_zRot = charger.pose.rotation.angle_z.radians

	    # Create target position 
	    # dist_charger in mm, distance if front of charger
	    c_coord[0] -=  dist_charger*math.cos(c_zRot)
	    c_coord[1] -=  dist_charger*math.sin(c_zRot)

	    # Direction and distance to target position (in front of charger)
	    distance = math.sqrt((c_coord[0]-r_coord[0])**2 + (c_coord[1]-r_coord[1])**2 + (c_coord[2]-r_coord[2])**2)
	    vect = [c_coord[0]-r_coord[0],c_coord[1]-r_coord[1],c_coord[2]-r_coord[2]]
	    # Angle of vector going from robot's origin to target's position
	    theta_t = math.atan2(vect[1],vect[0])

	    debug (2,'CHECK: Adjusting position')
	    # Face the target position
	    angle = clip_angle((theta_t-r_zRot))
	    robot.turn_in_place(radians(angle)).wait_for_completed()
	    # Drive toward the target position
	    robot.drive_straight(distance_mm(distance),speed_mmps(speed)).wait_for_completed()
	    # Face the charger
	    angle = clip_angle((c_zRot-theta_t))
	    robot.turn_in_place(radians(angle)).wait_for_completed()

        # In case the robot does not need to climb onto the charger
	    if not critical:
	        break
	    elif(check_tol(charger,dist_charger)):
	    	debug (2,'CHECK: Robot aligned relativ to the charger.')
	    	break
    return 
Example 75
Project: OpenAPS   Author: medicinexlab   File: mlalgorithm.py    MIT License 4 votes vote down vote up
def analyze_ml_data(actual_bg_array, bg_prediction, bg_time_array, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, algorithm_str, minutes_str):
    """
    Function to analyze and plot the machine learning data. It takes in the actual_bg_array and the bg_prediction and compares
    the two with various analysis methods, such as root mean squared error, mean absolute error,
    R^2 coefficient of determination, and clarke error grid analysis.

    Input:      actual_bg_array                 The array of actual bg values
                bg_prediction                   The array of prediction bg values
                bg_time_array                   The array of times corresponding to bg_prediction
                show_pred_plot                  Boolean to show the prediction plot
                save_pred_plot                  Boolean to save the prediction plot
                show_clarke_plot                Boolean to show the clarke error grid
                save_clarke_plot                Boolean to save the clarke error grid
                id_str                          String of the ID
                algorithm_str                   String of the algorithm name
                minutes_str                     String of the number of minutes (both prediction and data minutes)
.
    Output:     None
    Usage:      analyze_ml_data(actual_bg_test_array, test_prediction, True, False, True, False, "00000001", "Linear Regression", "Pred30Data5")
    """

    #Root mean squared error
    rms = math.sqrt(metrics.mean_squared_error(actual_bg_array, bg_prediction))
    print "                Root Mean Squared Error: " + str(rms)
    print "                Mean Absolute Error: " + str(metrics.mean_absolute_error(actual_bg_array, bg_prediction))
    print "                R^2 Coefficient of Determination: " + str(metrics.r2_score(actual_bg_array, bg_prediction))

    plot, zone = ClarkeErrorGrid.clarke_error_grid(actual_bg_array, bg_prediction, id_str + " " + algorithm_str + " " + minutes_str)
    print "                Percent A:{}".format(float(zone[0]) / (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
    print "                Percent C, D, E:{}".format(float(zone[2] + zone[3] + zone[4])/ (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
    print "                Zones are A:{}, B:{}, C:{}, D:{}, E:{}\n".format(zone[0],zone[1],zone[2],zone[3],zone[4])
    if save_clarke_plot: plt.savefig(id_str + algorithm_str.replace(" ", "") + minutes_str + "clarke.png")
    if show_clarke_plot: plot.show()

    plt.clf()
    plt.plot(bg_time_array, actual_bg_array, label="Actual BG", color='black', linestyle='-')
    plt.plot(bg_time_array, bg_prediction, label="BG Prediction", color='black', linestyle=':')
    plt.title(id_str + " " + algorithm_str + " " + minutes_str + " BG Analysis")
    plt.ylabel("Blood Glucose Level (mg/dl)")
    plt.xlabel("Time (minutes)")
    plt.legend(loc='upper left')

    # SHOW/SAVE PLOT DEPENDING ON THE BOOLEAN PARAMETER
    if save_pred_plot: plt.savefig(id_str + algorithm_str.replace(" ","") + minutes_str + "plot.png")
    if show_pred_plot: plt.show()


#Preprocesses the data by the standard scaler relative to the train_data_matrix 
Example 76
Project: CIM2Matpower   Author: kkgerasimov   File: CIM2Matpower.py    MIT License 4 votes vote down vote up
def _get_acline_mva_lim(self, acline):
        limit_class = 'CurrentLimit'
        limits_mva = [0, 0, 0]
        num_matpower_limits = 3

        try:
            limits_from_mva = []
            for lim in acline.Terminals[0].OperationalLimitSet[0].OperationalLimitValue:
                if lim.__class__.__name__ == limit_class:
                    limits_from_mva.append(lim.value * acline.Terminals[0].TopologicalNode.SvVoltage.v * math.sqrt(3) / 1e3)
            if limits_from_mva:
                limits_from_mva.sort()
                if len(limits_from_mva) < num_matpower_limits:
                    limits_from_mva.extend([0] * (num_matpower_limits - len(limits_from_mva)))
            if not limits_from_mva:
                limits_from_mva = [0, 0, 0]
        except:
            limits_from_mva = [0, 0, 0]

        try:
            limits_to_mva = []
            for lim in acline.Terminals[1].OperationalLimitSet[0].OperationalLimitValue:
                if lim.__class__.__name__ == limit_class:
                    limits_to_mva.append(lim.value * acline.Terminals[1].TopologicalNode.SvVoltage.v * math.sqrt(3) / 1e3)
            if limits_to_mva:
                limits_to_mva.sort()
                if len(limits_to_mva) < num_matpower_limits:
                    limits_to_mva.extend([0] * (num_matpower_limits - len(limits_to_mva)))
            if not limits_to_mva:
                limits_to_mva = [0, 0, 0]
        except:
            limits_to_mva = [0, 0, 0]

        for i in range(0,num_matpower_limits):
            if limits_from_mva[i] < limits_to_mva[i]:
                limits_mva[i] = limits_from_mva[i]
            else:
                limits_mva[i] = limits_to_mva[i]

        return limits_mva

    #################################################################################################################### 
Example 77
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: boxes_grid.py    MIT License 4 votes vote down vote up
def get_boxes_grid(image_height, image_width):
    """
    Return the boxes on image grid.
    """

    # height and width of the heatmap
    if cfg.NET_NAME == 'CaffeNet':
        height = np.floor((image_height * max(cfg.TRAIN.SCALES) - 1) / 4.0 + 1)
        height = np.floor((height - 1) / 2.0 + 1 + 0.5)
        height = np.floor((height - 1) / 2.0 + 1 + 0.5)

        width = np.floor((image_width * max(cfg.TRAIN.SCALES) - 1) / 4.0 + 1)
        width = np.floor((width - 1) / 2.0 + 1 + 0.5)
        width = np.floor((width - 1) / 2.0 + 1 + 0.5)
    elif cfg.NET_NAME == 'VGGnet':
        height = np.floor(image_height * max(cfg.TRAIN.SCALES) / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)

        width = np.floor(image_width * max(cfg.TRAIN.SCALES) / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
    else:
        assert (1), 'The network architecture is not supported in utils.get_boxes_grid!'

    # compute the grid box centers
    h = np.arange(height)
    w = np.arange(width)
    y, x = np.meshgrid(h, w, indexing='ij') 
    centers = np.dstack((x, y))
    centers = np.reshape(centers, (-1, 2))
    num = centers.shape[0]

    # compute width and height of grid box
    area = cfg.TRAIN.KERNEL_SIZE * cfg.TRAIN.KERNEL_SIZE
    aspect = cfg.TRAIN.ASPECTS  # height / width
    num_aspect = len(aspect)
    widths = np.zeros((1, num_aspect), dtype=np.float32)
    heights = np.zeros((1, num_aspect), dtype=np.float32)
    for i in range(num_aspect):
        widths[0,i] = math.sqrt(area / aspect[i])
        heights[0,i] = widths[0,i] * aspect[i]

    # construct grid boxes
    centers = np.repeat(centers, num_aspect, axis=0)
    widths = np.tile(widths, num).transpose()
    heights = np.tile(heights, num).transpose()

    x1 = np.reshape(centers[:,0], (-1, 1)) - widths * 0.5
    x2 = np.reshape(centers[:,0], (-1, 1)) + widths * 0.5
    y1 = np.reshape(centers[:,1], (-1, 1)) - heights * 0.5
    y2 = np.reshape(centers[:,1], (-1, 1)) + heights * 0.5
    
    boxes_grid = np.hstack((x1, y1, x2, y2)) / cfg.TRAIN.SPATIAL_SCALE

    return boxes_grid, centers[:,0], centers[:,1] 
Example 78
Project: kuaa   Author: rafaelwerneck   File: plugin_kappa.py    GNU General Public License v3.0 4 votes vote down vote up
def write_tex(evaluation_path, classes, node_id):
    """
    Calculates the average Cohen's Kappa from the values in the evaluation_path.
    """
    
    from numpy import array
    from scipy import stats
    from math import sqrt
    
    print "\t\tTeX: Cohen's Kappa"
    
    kappa_list = []
    
    evaluation_file = open(evaluation_path, "rb")
    for line in evaluation_file.readlines():
        kappa_list.append(float(line))
    evaluation_file.close()
    
    avg_kappa = array(kappa_list).mean()
    std_kappa = array(kappa_list).std()
    interval = stats.t.interval(0.95, len(kappa_list) - 1, loc = avg_kappa,
            scale = std_kappa / sqrt(len(kappa_list)))
    conf = avg_kappa - interval[0]
    
    evaluation_file = open(evaluation_path, "ab")
    evaluation_file.write("\nAverage Cohen's Kappa\n")
    evaluation_file.write(str(avg_kappa))
    evaluation_file.write("\nStandard Deviation\n")
    evaluation_file.write(str(std_kappa))
    evaluation_file.write("\nConfidence Interval (95%)\n")
    evaluation_file.write(str(conf))
    evaluation_file.close()
    
    tex_string = """
\\begin{table}[htbp]
    \\centering
    \\begin{tabular}{ccc}
        Mean & Deviation & Confidence Interval (95\\%%)\\\\
        \\hline
        %.2f & %.2f & %.2f
    \\end{tabular}
    \\caption{Average, Standard Deviation and Confidence Interval of the Cohen's Kappa of Node %s}
    \\label{tab:kappa_%s}
\\end{table}
    """ % (avg_kappa * 100, std_kappa * 100, conf * 100, node_id, node_id)
    
    return tex_string 
Example 79
Project: kuaa   Author: rafaelwerneck   File: plugin_global_accuracy_score.py    GNU General Public License v3.0 4 votes vote down vote up
def write_tex(evaluation_path, classes, node_id):
    """
    Calculates the average global accuracy score from the accuracy scores in the
    evaluation_path.
    """
    
    from scipy import stats
    from math import sqrt
    from ast import literal_eval
    
    ZERO = 0
    
    print "\t\tTeX: Global Accuracy Score"
    
    global_acc_list = []
    tex_string = ""
    
    evaluation_file = open(evaluation_path, "rb")
    for line in evaluation_file.readlines():
        global_acc_list.append(literal_eval(line))
    evaluation_file.close()
    
    for key in global_acc_list[ZERO].iterkeys():
        acc_list = []
        for global_acc in global_acc_list:
             acc_list.append(float(global_acc[key]))
        
        avg_acc = numpy.array(acc_list).mean()
        std_acc = numpy.array(acc_list).std()
        interval = stats.t.interval(0.95, len(acc_list) - 1, loc = avg_acc,
                scale = std_acc / sqrt(len(acc_list)))
        conf = avg_acc - interval[0]
        
        evaluation_file = open(evaluation_path, "ab")
        evaluation_file.write("\n{0}\nAverage Global Accuracy Score\n".format(key))
        evaluation_file.write(str(avg_acc))
        evaluation_file.write("\nStandard Deviation\n")
        evaluation_file.write(str(std_acc))
        evaluation_file.write("\nConfidence Interval (95%)\n")
        evaluation_file.write(str(conf))
        evaluation_file.close()
        
        tex_string += """
\\begin{table}[htbp]
    \\centering
    \\begin{tabular}{ccc}
        Mean & Deviation & Confidence Interval (95\\%%)\\\\
        \\hline
        %.2f & %.2f & %.2f
    \\end{tabular}
    \\caption{Average, Standard Deviation and Confidence Interval of the %s of Node %s}
    \\label{tab:acc_%s}
\\end{table}
        """ % (avg_acc * 100, std_acc * 100, conf * 100, key, node_id, node_id)
    
    return tex_string 
Example 80
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: minidensenet.py    MIT License 4 votes vote down vote up
def __init__(self, growthRate, depth, reduction, nClasses, bottleneck, n_dim):
        super(MiniDenseNet, self).__init__()

        nDenseBlocks = (depth - 4) // 3
        if bottleneck:
            nDenseBlocks //= 2

        nChannels = 2 * growthRate
        self.conv1 = nn.Conv2d(n_dim, nChannels, kernel_size=3, padding=1, bias=False)
        self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
        nChannels += nDenseBlocks * growthRate
        nOutChannels = int(math.floor(nChannels * reduction))
        self.trans1 = Transition(nChannels, nOutChannels)

        nChannels = nOutChannels
        self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
        nChannels += nDenseBlocks * growthRate
        nOutChannels = int(math.floor(nChannels * reduction))
        self.trans2 = Transition(nChannels, nOutChannels)

        nChannels = nOutChannels
        self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
        nChannels += nDenseBlocks * growthRate

        self.bn1 = nn.BatchNorm2d(nChannels)
        if bottleneck == False:
            self.fc = nn.Linear(768, nClasses)
        else:
            self.fc = nn.Linear(432, nClasses)

        self.sig = nn.Sigmoid()
        self.num_classes =nClasses

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()