Python math.log() Examples

The following are 30 code examples for showing how to use math.log(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module math , or try the search function .

Example 1
Project: decisiontrees   Author: jayelm   File: id3.py    License: MIT License 6 votes vote down vote up
def entropy(self, subset, attr, value, base=False):
        """
        Calculate the entropy of the given attribute/value pair from the
        given subset.

        Args:
            subset: the subset with which to calculate entropy.
            attr: the attribute of the value.
            value: the value used in calculation.
            base: whether or not to calculate base entropy based solely on the
                dependent value (default False).

        Returns:
            A float of the entropy of the given value.

        """
        counts = self.value_counts(subset, attr, value, base)
        total = float(sum(counts.values()))  # Coerce to float division
        entropy = 0
        for dv in counts:  # For each dependent value
            proportion = counts[dv] / total
            entropy += -(proportion*math.log(proportion, 2))
        return entropy 
Example 2
Project: text-rank   Author: ouprince   File: util.py    License: MIT License 6 votes vote down vote up
def get_similarity(word_list1, word_list2):
    """默认的用于计算两个句子相似度的函数。

    Keyword arguments:
    word_list1, word_list2  --  分别代表两个句子,都是由单词组成的列表
    """
    words   = list(set(word_list1 + word_list2))        
    vector1 = [float(word_list1.count(word)) for word in words]
    vector2 = [float(word_list2.count(word)) for word in words]
    
    vector3 = [vector1[x]*vector2[x]  for x in xrange(len(vector1))]
    vector4 = [1 for num in vector3 if num > 0.]
    co_occur_num = sum(vector4)

    if abs(co_occur_num) <= 1e-12:
        return 0.
    
    denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母
    
    if abs(denominator) < 1e-12:
        return 0.
    
    return co_occur_num / denominator 
Example 3
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: sampler.py    License: Apache License 2.0 6 votes vote down vote up
def draw(self, true_classes):
        """Draw samples from log uniform distribution and returns sampled candidates,
        expected count for true classes and sampled classes."""
        range_max = self.range_max
        num_sampled = self.num_sampled
        ctx = true_classes.context
        log_range = math.log(range_max + 1)
        num_tries = 0
        true_classes = true_classes.reshape((-1,))
        sampled_classes, num_tries = self.sampler.sample_unique(num_sampled)

        true_cls = true_classes.as_in_context(ctx).astype('float64')
        prob_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range
        count_true = self._prob_helper(num_tries, num_sampled, prob_true)

        sampled_classes = ndarray.array(sampled_classes, ctx=ctx, dtype='int64')
        sampled_cls_fp64 = sampled_classes.astype('float64')
        prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
        count_sampled = self._prob_helper(num_tries, num_sampled, prob_sampled)
        return [sampled_classes, count_true, count_sampled] 
Example 4
Project: DOTA_models   Author: ringringyi   File: accountant.py    License: Apache License 2.0 6 votes vote down vote up
def _compute_delta(self, log_moments, eps):
    """Compute delta for given log_moments and eps.

    Args:
      log_moments: the log moments of privacy loss, in the form of pairs
        of (moment_order, log_moment)
      eps: the target epsilon.
    Returns:
      delta
    """
    min_delta = 1.0
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      if log_moment < moment_order * eps:
        min_delta = min(min_delta,
                        math.exp(log_moment - moment_order * eps))
    return min_delta 
Example 5
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    License: Apache License 2.0 6 votes vote down vote up
def _compute_delta(log_moments, eps):
  """Compute delta for given log_moments and eps.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    eps: the target epsilon.
  Returns:
    delta
  """
  min_delta = 1.0
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    if log_moment < moment_order * eps:
      min_delta = min(min_delta,
                      math.exp(log_moment - moment_order * eps))
  return min_delta 
Example 6
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    License: Apache License 2.0 6 votes vote down vote up
def _compute_eps(log_moments, delta):
  """Compute epsilon for given log_moments and delta.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    delta: the target delta.
  Returns:
    epsilon
  """
  min_eps = float("inf")
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
  return min_eps 
Example 7
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    License: Apache License 2.0 6 votes vote down vote up
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
  """Compute delta (or eps) for given eps (or delta) from log moments.

  Args:
    log_moments: array of (moment_order, log_moment) pairs.
    target_eps: if not None, the epsilon for which we would like to compute
      corresponding delta value.
    target_delta: if not None, the delta for which we would like to compute
      corresponding epsilon value. Exactly one of target_eps and target_delta
      is None.
  Returns:
    eps, delta pair
  """
  assert (target_eps is None) ^ (target_delta is None)
  assert not ((target_eps is None) and (target_delta is None))
  if target_eps is not None:
    return (target_eps, _compute_delta(log_moments, target_eps))
  else:
    return (_compute_eps(log_moments, target_delta), target_delta) 
Example 8
Project: DOTA_models   Author: ringringyi   File: blocks_entropy_coding_test.py    License: Apache License 2.0 6 votes vote down vote up
def testCodeLength(self):
    shape = [2, 4]
    proba_feed = [[0.65, 0.25, 0.70, 0.10],
                  [0.28, 0.20, 0.44, 0.54]]
    symbol_feed = [[1.0, 0.0, 1.0, 0.0],
                   [0.0, 0.0, 0.0, 1.0]]
    mean_code_length = - (
        (math.log(0.65) + math.log(0.75) + math.log(0.70) + math.log(0.90) +
         math.log(0.72) + math.log(0.80) + math.log(0.56) + math.log(0.54)) /
        math.log(2.0)) / (shape[0] * shape[1])

    symbol = tf.placeholder(dtype=tf.float32, shape=shape)
    proba = tf.placeholder(dtype=tf.float32, shape=shape)
    code_length_calculator = blocks_entropy_coding.CodeLength()
    code_length = code_length_calculator(symbol, proba)

    with self.test_session():
      tf.global_variables_initializer().run()
      code_length_eval = code_length.eval(
          feed_dict={symbol: symbol_feed, proba: proba_feed})

    self.assertAllClose(mean_code_length, code_length_eval) 
Example 9
Project: cvpr2018-hnd   Author: kibok90   File: models.py    License: MIT License 6 votes vote down vote up
def __init__(self, T, opts):
        super(LOOLoss, self).__init__()
        
        self.gpu = opts.gpu
        self.loo = opts.loo if 'LOO' in opts.method else 0.
        self.label_smooth = opts.label_smooth
        self.kld_u_const = math.log(len(T['wnids']))
        self.relevant = [torch.from_numpy(rel) for rel in T['relevant']]
        self.labels_relevant = torch.from_numpy(T['labels_relevant'].astype(np.uint8))
        ch_slice = T['ch_slice']
        if opts.class_wise:
            num_children = T['num_children']
            num_supers = len(num_children)
            self.class_weight = torch.zeros(ch_slice[-1])
            for m, num_ch in enumerate(num_children):
                self.class_weight[ch_slice[m]:ch_slice[m+1]] = 1. / (num_ch * num_supers)
        else:
            self.class_weight = torch.ones(ch_slice[-1]) / ch_slice[-1] 
Example 10
Project: fine-lm   Author: akzaidi   File: common_layers.py    License: MIT License 6 votes vote down vote up
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
Example 11
Project: torch-toolbox   Author: PistonY   File: loss.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(
            self,
            classes,
            alpha,
            p=0.9,
            from_normx=False,
            weight=None,
            size_average=None,
            ignore_index=-100,
            reduce=None,
            reduction='mean'):
        super(L2Softmax, self).__init__(
            weight, size_average, reduce, reduction)
        alpha_low = math.log(p * (classes - 2) / (1 - p))
        assert alpha > alpha_low, "For given probability of p={}, alpha should higher than {}.".format(
            p, alpha_low)
        self.ignore_index = ignore_index
        self.alpha = alpha
        self.from_normx = from_normx 
Example 12
Project: svviz   Author: svviz   File: remap.py    License: MIT License 5 votes vote down vote up
def log2(x):
    try:
        return math.log(x, 2)
    except ValueError:
        return float("nan") 
Example 13
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: bleu.py    License: Apache License 2.0 5 votes vote down vote up
def corpus_bleu(hypothesis, references, max_n=4):
    assert(len(hypothesis) == len(references))
    clip_count, count, total_len_hyp, total_len_ref = bleu_count(hypothesis, references, max_n=max_n)
    brevity_penalty = 1.0
    bleu_scores = []
    bleu = 0
    for n in range(max_n):
        if count[n]>0:
            bleu_scores.append(clip_count[n]/count[n])
        else:
            bleu_scores.append(0)
    if total_len_hyp < total_len_ref:
        if total_len_hyp==0:
            brevity_penalty = 0.0
        else:
            brevity_penalty = math.exp(1 - total_len_ref/total_len_hyp)
    def my_log(x):
        if x == 0:
            return -9999999999.0
        elif x < 0:
            raise Exception("Value Error")
        return math.log(x)
    log_bleu = 0.0
    for n in range(max_n):
        log_bleu += my_log(bleu_scores[n])
    bleu = brevity_penalty*math.exp(log_bleu / float(max_n))
    return [bleu]+bleu_scores, [brevity_penalty, total_len_hyp/total_len_ref, total_len_hyp, total_len_ref] 
Example 14
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: bleu.py    License: Apache License 2.0 5 votes vote down vote up
def incremental_sent_bleu(hypothesis, references, max_n=4):
    clip_count, count, total_len_hyp, total_len_ref = incremental_bleu_count([hypothesis], [references], max_n=max_n)
    clip_count = clip_count[0]
    count = count[0]
    total_len_hyp = total_len_hyp[0]
    total_len_ref = total_len_ref[0]
    n_len = len(clip_count)
    ret = []
    for i in range(n_len):
        brevity_penalty = 1.0
        bleu_scores = []
        bleu = 0
        for n in range(max_n):
            if count[i][n]>0:
                bleu_scores.append(clip_count[i][n]/count[i][n])
            else:
                bleu_scores.append(0)
        if total_len_hyp[i] < total_len_ref[i]:
            if total_len_hyp[i]==0:
                brevity_penalty = 0.0
            else:
                brevity_penalty = math.exp(1 - total_len_ref[i]/total_len_hyp[i])
        def my_log(x):
            if x == 0:
                return -9999999999.0
            elif x < 0:
                raise Exception("Value Error")
            return math.log(x)
        log_bleu = 0.0
        for n in range(max_n):
            log_bleu += my_log(bleu_scores[n])
        bleu = brevity_penalty*math.exp(log_bleu / float(max_n))
        ret.append(bleu)
    return ret 
Example 15
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: bleu.py    License: Apache License 2.0 5 votes vote down vote up
def incremental_test_corpus_bleu(hypothesis, references, max_n=4):
    assert(len(hypothesis) == len(references))
    tmp_clip_count, tmp_count, tmp_total_len_hyp, tmp_total_len_ref = incremental_bleu_count(hypothesis, references, max_n=max_n)
    clip_count = [0]*4
    count = [0]*4
    total_len_hyp = 0
    total_len_ref = 0
    for i in range(len(hypothesis)):
        for n in range(4):
            clip_count[n]+=tmp_clip_count[i][-1][n]
            count[n] += tmp_count[i][-1][n]
        total_len_hyp += tmp_total_len_hyp[i][-1]
        total_len_ref += tmp_total_len_ref[i][-1]
    brevity_penalty = 1.0
    bleu_scores = []
    bleu = 0
    for n in range(max_n):
        if count[n]>0:
            bleu_scores.append(clip_count[n]/count[n])
        else:
            bleu_scores.append(0)
    if total_len_hyp < total_len_ref:
        if total_len_hyp==0:
            brevity_penalty = 0.0
        else:
            brevity_penalty = math.exp(1 - total_len_ref/total_len_hyp)
    def my_log(x):
        if x == 0:
            return -9999999999.0
        elif x < 0:
            raise Exception("Value Error")
        return math.log(x)
    log_bleu = 0.0
    for n in range(max_n):
        log_bleu += my_log(bleu_scores[n])
    bleu = brevity_penalty*math.exp(log_bleu / float(max_n))
    return [bleu]+bleu_scores, [brevity_penalty, total_len_hyp/total_len_ref, total_len_hyp, total_len_ref] 
Example 16
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe) 
Example 17
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe) 
Example 18
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: bleu.py    License: Apache License 2.0 5 votes vote down vote up
def incremental_sent_bleu(hypothesis, references, max_n=4):
    clip_count, count, total_len_hyp, total_len_ref = incremental_bleu_count([hypothesis], [references], max_n=max_n)
    clip_count = clip_count[0]
    count = count[0]
    total_len_hyp = total_len_hyp[0]
    total_len_ref = total_len_ref[0]
    n_len = len(clip_count)
    ret = []
    for i in range(n_len):
        brevity_penalty = 1.0
        bleu_scores = []
        bleu = 0
        for n in range(max_n):
            if count[i][n]>0:
                bleu_scores.append(clip_count[i][n]/count[i][n])
            else:
                bleu_scores.append(0)
        if total_len_hyp[i] < total_len_ref[i]:
            if total_len_hyp[i]==0:
                brevity_penalty = 0.0
            else:
                brevity_penalty = math.exp(1 - total_len_ref[i]/total_len_hyp[i])
        def my_log(x):
            if x == 0:
                return -9999999999.0
            elif x < 0:
                raise Exception("Value Error")
            return math.log(x)
        log_bleu = 0.0
        for n in range(max_n):
            log_bleu += my_log(bleu_scores[n])
        bleu = brevity_penalty*math.exp(log_bleu / float(max_n))
        ret.append(bleu)
    return ret 
Example 19
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: bleu.py    License: Apache License 2.0 5 votes vote down vote up
def incremental_test_corpus_bleu(hypothesis, references, max_n=4):
    assert(len(hypothesis) == len(references))
    tmp_clip_count, tmp_count, tmp_total_len_hyp, tmp_total_len_ref = incremental_bleu_count(hypothesis, references, max_n=max_n)
    clip_count = [0]*4
    count = [0]*4
    total_len_hyp = 0
    total_len_ref = 0
    for i in range(len(hypothesis)):
        for n in range(4):
            clip_count[n]+=tmp_clip_count[i][-1][n]
            count[n] += tmp_count[i][-1][n]
        total_len_hyp += tmp_total_len_hyp[i][-1]
        total_len_ref += tmp_total_len_ref[i][-1]
    brevity_penalty = 1.0
    bleu_scores = []
    bleu = 0
    for n in range(max_n):
        if count[n]>0:
            bleu_scores.append(clip_count[n]/count[n])
        else:
            bleu_scores.append(0)
    if total_len_hyp < total_len_ref:
        if total_len_hyp==0:
            brevity_penalty = 0.0
        else:
            brevity_penalty = math.exp(1 - total_len_ref/total_len_hyp)
    def my_log(x):
        if x == 0:
            return -9999999999.0
        elif x < 0:
            raise Exception("Value Error")
        return math.log(x)
    log_bleu = 0.0
    for n in range(max_n):
        log_bleu += my_log(bleu_scores[n])
    bleu = brevity_penalty*math.exp(log_bleu / float(max_n))
    return [bleu]+bleu_scores, [brevity_penalty, total_len_hyp/total_len_ref, total_len_hyp, total_len_ref] 
Example 20
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe) 
Example 21
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe) 
Example 22
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: bleu.py    License: Apache License 2.0 5 votes vote down vote up
def corpus_bleu(hypothesis, references, max_n=4):
    assert(len(hypothesis) == len(references))
    clip_count, count, total_len_hyp, total_len_ref = bleu_count(hypothesis, references, max_n=max_n)
    brevity_penalty = 1.0
    bleu_scores = []
    bleu = 0
    for n in range(max_n):
        if count[n]>0:
            bleu_scores.append(clip_count[n]/count[n])
        else:
            bleu_scores.append(0)
    if total_len_hyp < total_len_ref:
        if total_len_hyp==0:
            brevity_penalty = 0.0
        else:
            brevity_penalty = math.exp(1 - total_len_ref/total_len_hyp)
    def my_log(x):
        if x == 0:
            return -9999999999.0
        elif x < 0:
            raise Exception("Value Error")
        return math.log(x)
    log_bleu = 0.0
    for n in range(max_n):
        log_bleu += my_log(bleu_scores[n])
    bleu = brevity_penalty*math.exp(log_bleu / float(max_n))
    return [bleu]+bleu_scores, [brevity_penalty, total_len_hyp/total_len_ref, total_len_hyp, total_len_ref] 
Example 23
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: bleu.py    License: Apache License 2.0 5 votes vote down vote up
def incremental_test_corpus_bleu(hypothesis, references, max_n=4):
    assert(len(hypothesis) == len(references))
    tmp_clip_count, tmp_count, tmp_total_len_hyp, tmp_total_len_ref = incremental_bleu_count(hypothesis, references, max_n=max_n)
    clip_count = [0]*4
    count = [0]*4
    total_len_hyp = 0
    total_len_ref = 0
    for i in range(len(hypothesis)):
        for n in range(4):
            clip_count[n]+=tmp_clip_count[i][-1][n]
            count[n] += tmp_count[i][-1][n]
        total_len_hyp += tmp_total_len_hyp[i][-1]
        total_len_ref += tmp_total_len_ref[i][-1]
    brevity_penalty = 1.0
    bleu_scores = []
    bleu = 0
    for n in range(max_n):
        if count[n]>0:
            bleu_scores.append(clip_count[n]/count[n])
        else:
            bleu_scores.append(0)
    if total_len_hyp < total_len_ref:
        if total_len_hyp==0:
            brevity_penalty = 0.0
        else:
            brevity_penalty = math.exp(1 - total_len_ref/total_len_hyp)
    def my_log(x):
        if x == 0:
            return -9999999999.0
        elif x < 0:
            raise Exception("Value Error")
        return math.log(x)
    log_bleu = 0.0
    for n in range(max_n):
        log_bleu += my_log(bleu_scores[n])
    bleu = brevity_penalty*math.exp(log_bleu / float(max_n))
    return [bleu]+bleu_scores, [brevity_penalty, total_len_hyp/total_len_ref, total_len_hyp, total_len_ref] 
Example 24
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe) 
Example 25
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe) 
Example 26
Project: multibootusb   Author: mbusb   File: widgets.py    License: GNU General Public License v2.0 5 votes vote down vote up
def update(self, pbar):
        """Updates the widget with the current SI prefixed speed."""

        if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0
            scaled = power = 0
        else:
            speed = pbar.currval / pbar.seconds_elapsed
            power = int(math.log(speed, 1000))
            scaled = speed / 1000.**power

        return self.FORMAT % (scaled, self.PREFIXES[power], self.unit) 
Example 27
Project: open-sesame   Author: swabhs   File: argid.py    License: Apache License 2.0 5 votes vote down vote up
def get_factor_expressions(fws, bws, tfemb, tfdict, valid_fes, sentence, spaths_x=None, cpaths_x=None):
    factexprs = {}
    sentlen = len(fws)

    sortedtfd = sorted(tfdict.keys())
    targetspan = (sortedtfd[0], sortedtfd[-1])

    for j in xrange(sentlen):
        istart = 0
        if USE_SPAN_CLIP and j > ALLOWED_SPANLEN: istart = max(0, j - ALLOWED_SPANLEN)
        for i in xrange(istart, j + 1):

            spanlen = scalarInput(j - i + 1)
            logspanlen = scalarInput(math.log(j - i + 1))
            spanwidth = sp_x[SpanWidth.howlongisspan(i, j)]
            spanpos = ap_x[ArgPosition.whereisarg((i, j), targetspan)]

            fbemb_ij_basic = concatenate([fws[i][j], bws[i][j], tfemb, spanlen, logspanlen, spanwidth, spanpos])
            if USE_DEPS:
                outs = oh_s[OutHeads.getnumouts(i, j, sentence.outheads)]
                shp = spaths_x[sentence.shortest_paths[(i, j, targetspan[0])]]
                fbemb_ij = concatenate([fbemb_ij_basic, outs, shp])
            elif USE_CONSTITS:
                isconstit = scalarInput((i, j) in sentence.constitspans)
                lca = ct_x[sentence.lca[(i, j)][1]]
                phrp = cpaths_x[sentence.cpaths[(i, j, targetspan[0])]]
                fbemb_ij = concatenate([fbemb_ij_basic, isconstit, lca, phrp])
            else:
                fbemb_ij = fbemb_ij_basic

            for y in valid_fes:
                fctr = Factor(i, j, y)
                if USE_HIER and y in feparents:
                    fefixed = esum([fe_x[y]] + [fe_x[par] for par in feparents[y]])
                else:
                    fefixed = fe_x[y]
                fbemb_ijy = concatenate([fefixed, fbemb_ij])
                factexprs[fctr] = w_f * rectify(w_z * fbemb_ijy + b_z) + b_f
    return factexprs 
Example 28
Project: DOTA_models   Author: ringringyi   File: accountant.py    License: Apache License 2.0 5 votes vote down vote up
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)]) 
Example 29
Project: DOTA_models   Author: ringringyi   File: accountant.py    License: Apache License 2.0 5 votes vote down vote up
def _compute_log_moment(self, sigma, q, moment_order):
    """Compute high moment of privacy loss.

    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      q: the sampling ratio.
      moment_order: the order of moment.
    Returns:
      log E[exp(moment_order * X)]
    """
    pass 
Example 30
Project: DOTA_models   Author: ringringyi   File: accountant.py    License: Apache License 2.0 5 votes vote down vote up
def _compute_eps(self, log_moments, delta):
    min_eps = float("inf")
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
    return min_eps