Python numpy.log2() Examples

The following are 30 code examples for showing how to use numpy.log2(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: xrft   Author: xgcm   File: xrft.py    License: MIT License 6 votes vote down vote up
def fit_loglog(x, y):
    """
    Fit a line to isotropic spectra in log-log space

    Parameters
    ----------
    x : `numpy.array`
        Coordinate of the data
    y : `numpy.array`
        data

    Returns
    -------
    y_fit : `numpy.array`
        The linear fit
    a : float64
        Slope of the fit
    b : float64
        Intercept of the fit
    """
    # fig log vs log
    p = np.polyfit(np.log2(x), np.log2(y), 1)
    y_fit = 2**(np.log2(x)*p[0] + p[1])

    return y_fit, p[0], p[1] 
Example 2
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    License: MIT License 6 votes vote down vote up
def add_image(self, img):
        if self.print_progress and self.cur_images % self.progress_interval == 0:
            print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
            sys.stdout.flush()
        if self.shape is None:
            self.shape = img.shape
            self.resolution_log2 = int(np.log2(self.shape[1]))
            assert self.shape[0] in [1, 3]
            assert self.shape[1] == self.shape[2]
            assert self.shape[1] == 2**self.resolution_log2
            tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
            for lod in range(self.resolution_log2 - 1):
                tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
                self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
        assert img.shape == self.shape
        for lod, tfr_writer in enumerate(self.tfr_writers):
            if lod:
                img = img.astype(np.float32)
                img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
            quant = np.rint(img).clip(0, 255).astype(np.uint8)
            ex = tf.train.Example(features=tf.train.Features(feature={
                'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
                'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
            tfr_writer.write(ex.SerializeToString())
        self.cur_images += 1 
Example 3
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset.py    License: MIT License 6 votes vote down vote up
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
        self.resolution         = resolution
        self.resolution_log2    = int(np.log2(resolution))
        self.shape              = [num_channels, resolution, resolution]
        self.dtype              = dtype
        self.dynamic_range      = dynamic_range
        self.label_size         = label_size
        self.label_dtype        = label_dtype
        self._tf_minibatch_var  = None
        self._tf_lod_var        = None
        self._tf_minibatch_np   = None
        self._tf_labels_np      = None

        assert self.resolution == 2 ** self.resolution_log2
        with tf.name_scope('Dataset'):
            self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
            self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var') 
Example 4
Project: models   Author: kipoi   File: model.py    License: MIT License 6 votes vote down vote up
def predict_on_batch(self, inputs):
            if inputs.shape == (2,):
                inputs = inputs[np.newaxis, :]
            # Encode
            max_len = len(max(inputs, key=len))
            one_hot_ref =  self.encode(inputs[:,0])
            one_hot_alt = self.encode(inputs[:,1])
            # Construct dummy library indicator
            indicator = np.zeros((inputs.shape[0],2))
            indicator[:,1] = 1
            # Compute fold change for all three frames
            fc_changes = []
            for shift in range(3):
                if shift > 0:
                    shifter = np.zeros((one_hot_ref.shape[0],1,4))
                    one_hot_ref = np.concatenate([one_hot_ref, shifter], axis=1)
                    one_hot_alt = np.concatenate([one_hot_alt, shifter], axis=1)
                pred_ref = self.model.predict_on_batch([one_hot_ref, indicator]).reshape(-1)
                pred_variant = self.model.predict_on_batch([one_hot_alt, indicator]).reshape(-1)
                fc_changes.append(np.log2(pred_variant/pred_ref))
            # Return
            return {"mrl_fold_change":fc_changes[0], 
                    "shift_1":fc_changes[1],
                    "shift_2":fc_changes[2]} 
Example 5
Project: discomll   Author: romanorac   File: measures.py    License: Apache License 2.0 6 votes vote down vote up
def multinomLog2(selectors):
    """
    Function calculates logarithm 2 of a kind of multinom.

    selectors: list of integers
    """

    ln2 = 0.69314718055994528622
    noAll = sum(selectors)
    lgNf = math.lgamma(noAll + 1.0) / ln2  # log2(N!)

    lgnFac = []
    for selector in selectors:
        if selector == 0 or selector == 1:
            lgnFac.append(0.0)
        elif selector == 2:
            lgnFac.append(1.0)
        elif selector == noAll:
            lgnFac.append(lgNf)
        else:
            lgnFac.append(math.lgamma(selector + 1.0) / ln2)
    return lgNf - sum(lgnFac) 
Example 6
Project: contextualbandits   Author: david-cortes   File: utils.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _check_beta_prior(beta_prior, nchoices, for_ucb=False):
    if beta_prior == 'auto':
        if not for_ucb:
            out = ( (2.0 / np.log2(nchoices), 4.0), 2 )
        else:
            out = ( (3.0 / np.log2(nchoices), 4.0), 2 )
    elif beta_prior is None:
        out = ((1.0,1.0), 0)
    else:
        assert len(beta_prior) == 2
        assert len(beta_prior[0]) == 2
        assert isinstance(beta_prior[1], int)
        assert isinstance(beta_prior[0][0], int) or isinstance(beta_prior[0][0], float)
        assert isinstance(beta_prior[0][1], int) or isinstance(beta_prior[0][1], float)
        assert (beta_prior[0][0] > 0.) and (beta_prior[0][1] > 0.)
        out = beta_prior
    return out 
Example 7
Project: pyGSTi   Author: pyGSTio   File: basisconstructors.py    License: Apache License 2.0 6 votes vote down vote up
def pp_labels(matrix_dim):
    def _is_integer(x):
        return bool(abs(x - round(x)) < 1e-6)
    if matrix_dim == 0: return []
    if matrix_dim == 1: return ['']  # special case - use empty label instead of "I"

    nQubits = _np.log2(matrix_dim)
    if not _is_integer(nQubits):
        raise ValueError("Dimension for Pauli tensor product matrices must be an integer *power of 2*")
    nQubits = int(round(nQubits))

    lblList = []
    basisLblList = [['I', 'X', 'Y', 'Z']] * nQubits
    for sigmaLbls in _itertools.product(*basisLblList):
        lblList.append(''.join(sigmaLbls))
    return lblList 
Example 8
Project: pyGSTi   Author: pyGSTio   File: smq1Q_Xpi2_rpe.py    License: Apache License 2.0 6 votes vote down vote up
def get_rpe_experiment_design(max_max_length, qubit_labels=None, req_counts=None):
    max_log_lengths = _np.log2(max_max_length)
    if not (int(max_log_lengths) - max_log_lengths == 0):
        raise ValueError('Only integer powers of two accepted for max_max_length.')

    assert(qubit_labels is None or qubit_labels == (0,)), "Only qubit_labels=(0,) is supported so far"
    return _rpe.RobustPhaseEstimationDesign(
        _obj.Circuit([('Gxpi2', 0)], line_labels=(0,)),
        [2**i for i in range(int(max_log_lengths) + 1)],
        _obj.Circuit([], line_labels=(0,)),
        _obj.Circuit([('Gxpi2', 0)], line_labels=(0,)),
        ['1'],
        ['0'],
        _obj.Circuit([], line_labels=(0,)),
        _obj.Circuit([], line_labels=(0,)),
        ['0'],
        ['1'],
        qubit_labels=qubit_labels,
        req_counts=req_counts) 
Example 9
Project: pyGSTi   Author: pyGSTio   File: smq1Q_Ypi2_rpe.py    License: Apache License 2.0 6 votes vote down vote up
def get_rpe_experiment_design(max_max_length, qubit_labels=None, req_counts=None):
    max_log_lengths = _np.log2(max_max_length)
    if not (int(max_log_lengths) - max_log_lengths == 0):
        raise ValueError('Only integer powers of two accepted for max_max_length.')

    assert(qubit_labels is None or qubit_labels == (0,)), "Only qubit_labels=(0,) is supported so far"
    return _rpe.RobustPhaseEstimationDesign(
        _obj.Circuit([('Gypi2', 0)], line_labels=(0,)),
        [2**i for i in range(int(max_log_lengths) + 1)],
        _obj.Circuit([], line_labels=(0,)),
        _obj.Circuit([('Gypi2', 0)], line_labels=(0,)),
        ['1'],
        ['0'],
        _obj.Circuit([], line_labels=(0,)),
        _obj.Circuit([], line_labels=(0,)),
        ['0'],
        ['1'],
        qubit_labels=qubit_labels,
        req_counts=req_counts) 
Example 10
Project: rcan-tensorflow   Author: kozistr   File: model.py    License: MIT License 6 votes vote down vote up
def up_scaling(self, x, f, scale_factor, name):
        """
        :param x: image
        :param f: conv2d filter
        :param scale_factor: scale factor
        :param name: scope name
        :return:
        """
        with tf.variable_scope(name):
            if scale_factor == 3:
                x = tfu.conv2d(x, f * 9, k=1, name='conv2d-image_scaling-0')
                x = tfu.pixel_shuffle(x, 3)
            elif scale_factor & (scale_factor - 1) == 0:  # is it 2^n?
                log_scale_factor = int(np.log2(scale_factor))
                for i in range(log_scale_factor):
                    x = tfu.conv2d(x, f * 4, k=1, name='conv2d-image_scaling-%d' % i)
                    x = tfu.pixel_shuffle(x, 2)
            else:
                raise NotImplementedError("[-] Not supported scaling factor (%d)" % scale_factor)
            return x 
Example 11
Project: astropy-healpix   Author: astropy   File: core.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def nside_to_level(nside):
    """
    Find the HEALPix level for a given nside.

    This is given by ``level = log2(nside)``.

    This function is the inverse of `level_to_nside`.

    Parameters
    ----------
    nside : int
        The number of pixels on the side of one of the 12 'top-level' HEALPix tiles.
        Must be a power of two.

    Returns
    -------
    level : int
        The level of the HEALPix cells
    """
    nside = np.asarray(nside, dtype=np.int64)

    _validate_nside(nside)
    return np.log2(nside).astype(np.int64) 
Example 12
Project: recruit   Author: Frank-qlu   File: histograms.py    License: Apache License 2.0 6 votes vote down vote up
def _hist_bin_sturges(x, range):
    """
    Sturges histogram bin estimator.

    A very simplistic estimator based on the assumption of normality of
    the data. This estimator has poor performance for non-normal data,
    which becomes especially obvious for large data sets. The estimate
    depends only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (np.log2(x.size) + 1.0) 
Example 13
Project: recruit   Author: Frank-qlu   File: test_umath.py    License: Apache License 2.0 6 votes vote down vote up
def test_branch_cuts(self):
        # check branch cuts and continuity on them
        _check_branch_cut(np.log,   -0.5, 1j, 1, -1, True)
        _check_branch_cut(np.log2,  -0.5, 1j, 1, -1, True)
        _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
        _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
        _check_branch_cut(np.sqrt,  -0.5, 1j, 1, -1, True)

        _check_branch_cut(np.arcsin, [ -2, 2],   [1j, 1j], 1, -1, True)
        _check_branch_cut(np.arccos, [ -2, 2],   [1j, 1j], 1, -1, True)
        _check_branch_cut(np.arctan, [0-2j, 2j],  [1,  1], -1, 1, True)

        _check_branch_cut(np.arcsinh, [0-2j,  2j], [1,   1], -1, 1, True)
        _check_branch_cut(np.arccosh, [ -1, 0.5], [1j,  1j], 1, -1, True)
        _check_branch_cut(np.arctanh, [ -2,   2], [1j, 1j], 1, -1, True)

        # check against bogus branch cuts: assert continuity between quadrants
        _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1,  1], 1, 1)
        _check_branch_cut(np.arccos, [0-2j, 2j], [ 1,  1], 1, 1)
        _check_branch_cut(np.arctan, [ -2,  2], [1j, 1j], 1, 1)

        _check_branch_cut(np.arcsinh, [ -2,  2, 0], [1j, 1j, 1], 1, 1)
        _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1,  1,  1j], 1, 1)
        _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1,  1,  1j], 1, 1) 
Example 14
Project: recruit   Author: Frank-qlu   File: test_umath.py    License: Apache License 2.0 6 votes vote down vote up
def test_branch_cuts_complex64(self):
        # check branch cuts and continuity on them
        _check_branch_cut(np.log,   -0.5, 1j, 1, -1, True, np.complex64)
        _check_branch_cut(np.log2,  -0.5, 1j, 1, -1, True, np.complex64)
        _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
        _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
        _check_branch_cut(np.sqrt,  -0.5, 1j, 1, -1, True, np.complex64)

        _check_branch_cut(np.arcsin, [ -2, 2],   [1j, 1j], 1, -1, True, np.complex64)
        _check_branch_cut(np.arccos, [ -2, 2],   [1j, 1j], 1, -1, True, np.complex64)
        _check_branch_cut(np.arctan, [0-2j, 2j],  [1,  1], -1, 1, True, np.complex64)

        _check_branch_cut(np.arcsinh, [0-2j,  2j], [1,   1], -1, 1, True, np.complex64)
        _check_branch_cut(np.arccosh, [ -1, 0.5], [1j,  1j], 1, -1, True, np.complex64)
        _check_branch_cut(np.arctanh, [ -2,   2], [1j, 1j], 1, -1, True, np.complex64)

        # check against bogus branch cuts: assert continuity between quadrants
        _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1,  1], 1, 1, False, np.complex64)
        _check_branch_cut(np.arccos, [0-2j, 2j], [ 1,  1], 1, 1, False, np.complex64)
        _check_branch_cut(np.arctan, [ -2,  2], [1j, 1j], 1, 1, False, np.complex64)

        _check_branch_cut(np.arcsinh, [ -2,  2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
        _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1,  1,  1j], 1, 1, False, np.complex64)
        _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1,  1,  1j], 1, 1, False, np.complex64) 
Example 15
Project: KAIR   Author: cszn   File: utils_deblur.py    License: MIT License 6 votes vote down vote up
def p2o(psf, shape):
    '''
    # psf: NxCxhxw
    # shape: [H,W]
    # otf: NxCxHxWx2
    '''
    otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
    otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
    for axis, axis_size in enumerate(psf.shape[2:]):
        otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
    otf = torch.rfft(otf, 2, onesided=False)
    n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
    otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
    return otf



# otf2psf: not sure where I got this one from. Maybe translated from Octave source code or whatever. It's just math. 
Example 16
Project: KAIR   Author: cszn   File: utils_sisr.py    License: MIT License 6 votes vote down vote up
def p2o(psf, shape):
    '''
    Args:
        psf: NxCxhxw
        shape: [H,W]

    Returns:
        otf: NxCxHxWx2
    '''
    otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
    otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
    for axis, axis_size in enumerate(psf.shape[2:]):
        otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
    otf = torch.rfft(otf, 2, onesided=False)
    n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
    otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
    return otf 
Example 17
Project: basenji   Author: calico   File: basenji_sad.py    License: Apache License 2.0 6 votes vote down vote up
def write_snp(ref_preds, alt_preds, sad_out, si, sad_stats, log_pseudo):
  """Write SNP predictions to HDF."""

  # sum across length
  ref_preds_sum = ref_preds.sum(axis=0, dtype='float64')
  alt_preds_sum = alt_preds.sum(axis=0, dtype='float64')

  # compare reference to alternative via mean subtraction
  if 'SAD' in sad_stats:
    sad = alt_preds_sum - ref_preds_sum
    sad_out['SAD'][si,:] = sad.astype('float16')

  # compare reference to alternative via mean log division
  if 'SAR' in sad_stats:
    sar = np.log2(alt_preds_sum + log_pseudo) \
                   - np.log2(ref_preds_sum + log_pseudo)
    sad_out['SAR'][szi,:] = sar.astype('float16')

  # compare geometric means
  if 'geoSAD' in sad_stats:
    sar_vec = np.log2(alt_preds.astype('float64') + log_pseudo) \
                - np.log2(ref_preds.astype('float64') + log_pseudo)
    geo_sad = sar_vec.sum(axis=0)
    sad_out['geoSAD'][szi,:] = geo_sad.astype('float16') 
Example 18
Project: basenji   Author: calico   File: basenji_motifs_denovo.py    License: Apache License 2.0 6 votes vote down vote up
def plot_kernel(kernel_weights, out_pdf):
    depth, width = kernel_weights.shape
    fig_width = 2 + 1.5*np.log2(width)

    # normalize
    kernel_weights -= kernel_weights.mean(axis=0)

    # plot
    sns.set(font_scale=1.5)
    plt.figure(figsize=(fig_width, depth))
    sns.heatmap(kernel_weights, cmap='PRGn', linewidths=0.2, center=0)
    ax = plt.gca()
    ax.set_xticklabels(range(1,width+1))

    if depth == 4:
        ax.set_yticklabels('ACGT', rotation='horizontal')
    else:
        ax.set_yticklabels(range(1,depth+1), rotation='horizontal')

    plt.savefig(out_pdf)
    plt.close() 
Example 19
Project: basenji   Author: calico   File: basenji_motifs.py    License: Apache License 2.0 6 votes vote down vote up
def info_content(pwm, transpose=False, bg_gc=0.415):
  """ Compute PWM information content.

    In the original analysis, I used a bg_gc=0.5. For any
    future analysis, I ought to switch to the true hg19
    value of 0.415.
    """
  pseudoc = 1e-9

  if transpose:
    pwm = np.transpose(pwm)

  bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc]

  ic = 0
  for i in range(pwm.shape[0]):
    for j in range(4):
      # ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j])
      ic += -bg_pwm[j] * np.log2(
          bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j])

  return ic 
Example 20
Project: DDPAE-video-prediction   Author: jthsieh   File: DDPAE.py    License: MIT License 5 votes vote down vote up
def setup_networks(self):
    '''
    Networks for DDPAE.
    '''
    self.nets = {}
    # These will be registered in model() and guide() with pyro.module().
    self.model_modules = {}
    self.guide_modules = {}

    # Backbone, Pose RNN
    pose_model = PoseRNN(self.n_components, self.n_frames_output, self.n_channels,
                         self.image_size, self.image_latent_size, self.hidden_size,
                         self.ngf, self.pose_latent_size, self.independent_components)
    self.pose_model = nn.DataParallel(pose_model.cuda())

    self.nets['pose_model'] = self.pose_model
    self.guide_modules['pose_model'] = self.pose_model

    # Content LSTM
    content_lstm = SequenceEncoder(self.content_latent_size, self.hidden_size,
                                   self.content_latent_size * 2)
    self.content_lstm = nn.DataParallel(content_lstm.cuda())
    self.nets['content_lstm'] = self.content_lstm
    self.model_modules['content_lstm'] = self.content_lstm

    # Image encoder and decoder
    n_layers = int(np.log2(self.object_size)) - 1
    object_encoder = ImageEncoder(self.n_channels, self.content_latent_size,
                                  self.ngf, n_layers)
    object_decoder = ImageDecoder(self.content_latent_size, self.n_channels,
                                  self.ngf, n_layers, 'sigmoid')
    self.object_encoder = nn.DataParallel(object_encoder.cuda())
    self.object_decoder = nn.DataParallel(object_decoder.cuda())
    self.nets.update({'object_encoder': self.object_encoder,
                      'object_decoder': self.object_decoder})
    self.model_modules['decoder'] = self.object_decoder
    self.guide_modules['encoder'] = self.object_encoder 
Example 21
Project: DDPAE-video-prediction   Author: jthsieh   File: pose_rnn.py    License: MIT License 5 votes vote down vote up
def __init__(self, n_components, n_frames_output, n_channels, image_size,
               image_latent_size, hidden_size, ngf, output_size, independent_components):
    super(PoseRNN, self).__init__()

    n_layers = int(np.log2(image_size)) - 1
    self.image_encoder = ImageEncoder(n_channels, image_latent_size, ngf, n_layers)
    # Encoder
    self.encode_rnn = nn.LSTM(image_latent_size + hidden_size, hidden_size,
                              num_layers=1, batch_first=True)
    if independent_components:
      predict_input_size = hidden_size
    else:
      predict_input_size = hidden_size * 2
    self.predict_rnn = nn.LSTM(predict_input_size, hidden_size, num_layers=1, batch_first=True)

    # Beta
    self.beta_mu_layer = nn.Linear(hidden_size, output_size)
    self.beta_sigma_layer = nn.Linear(hidden_size, output_size)

    # Initial pose
    self.initial_pose_rnn = nn.LSTM(hidden_size, hidden_size, num_layers=1, batch_first=True)
    self.initial_pose_mu = nn.Linear(hidden_size, output_size)
    self.initial_pose_sigma = nn.Linear(hidden_size, output_size)

    self.n_components = n_components
    self.n_frames_output = n_frames_output
    self.image_latent_size = image_latent_size
    self.hidden_size = hidden_size
    self.output_size = output_size
    self.independent_components = independent_components 
Example 22
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    License: MIT License 5 votes vote down vote up
def create_from_images(tfrecord_dir, image_dir, label_dir, shuffle):
    print('Loading images from "%s"' % image_dir)
    image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
    if len(image_filenames) == 0:
        error('No input images found')
        
    img = np.asarray(PIL.Image.open(image_filenames[0]))
    resolution = img.shape[0]
    channels = img.shape[2] if img.ndim == 3 else 1
    if img.shape[1] != resolution:
        error('Input images must have the same width and height')
    if resolution != 2 ** int(np.floor(np.log2(resolution))):
        error('Input image resolution must be a power-of-two')
    if channels not in [1, 3]:
        error('Input images must be stored as RGB or grayscale')

    try:
        with open(label_dir, 'rb') as file:
            labels = pickle.load(file)
    except:
        error('Label file was not found')
    
    with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
        order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
        reordered_names = []
        for idx in range(order.size):
            image_filename = image_filenames[order[idx]]
            img = np.asarray(PIL.Image.open(image_filename))
            if channels == 1:
                img = img[np.newaxis, :, :] # HW => CHW
            else:
                img = img.transpose(2, 0, 1) # HWC => CHW
            tfr.add_image(img)
            reordered_names.append(os.path.basename(image_filename))
        reordered_labels = []
        for key in reordered_names:
            reordered_labels += [labels[key]]
        reordered_labels = np.stack(reordered_labels, 0)
        tfr.add_labels(reordered_labels)

#---------------------------------------------------------------------------- 
Example 23
Project: discomll   Author: romanorac   File: measures.py    License: Apache License 2.0 5 votes vote down vote up
def h(values):
    """
    Function calculates entropy.

    values: list of integers
    """
    ent = np.true_divide(values, np.sum(values))
    return -np.sum(np.multiply(ent, np.log2(ent))) 
Example 24
Project: DOTA_models   Author: ringringyi   File: memory.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, key_dim, memory_size, vocab_size,
               choose_k=256, alpha=0.1, correct_in_top=1, age_noise=8.0,
               var_cache_device='', nn_device='',
               num_hashes=None, num_libraries=None):
    super(LSHMemory, self).__init__(
        key_dim, memory_size, vocab_size,
        choose_k=choose_k, alpha=alpha, correct_in_top=1, age_noise=age_noise,
        var_cache_device=var_cache_device, nn_device=nn_device)

    self.num_libraries = num_libraries or int(self.choose_k ** 0.5)
    self.num_per_hash_slot = max(1, self.choose_k // self.num_libraries)
    self.num_hashes = (num_hashes or
                       int(np.log2(self.memory_size / self.num_per_hash_slot)))
    self.num_hashes = min(max(self.num_hashes, 1), 20)
    self.num_hash_slots = 2 ** self.num_hashes

    # hashing vectors
    self.hash_vecs = [
        tf.get_variable(
            'hash_vecs%d' % i, [self.num_hashes, self.key_dim],
            dtype=tf.float32, trainable=False,
            initializer=tf.truncated_normal_initializer(0, 1))
        for i in xrange(self.num_libraries)]

    # map representing which hash slots map to which mem keys
    self.hash_slots = [
        tf.get_variable(
            'hash_slots%d' % i, [self.num_hash_slots, self.num_per_hash_slot],
            dtype=tf.int32, trainable=False,
            initializer=tf.random_uniform_initializer(maxval=self.memory_size,
                                                      dtype=tf.int32))
        for i in xrange(self.num_libraries)] 
Example 25
Project: Kaggler   Author: jeongyoonlee   File: categorical.py    License: MIT License 5 votes vote down vote up
def _get_model(X, cat_cols, num_cols, n_uniq, n_emb, output_activation):
        inputs = []
        num_inputs = []
        embeddings = []
        for i, col in enumerate(cat_cols):

            if not n_uniq[i]:
                n_uniq[i] = X[col].nunique()
            if not n_emb[i]:
                n_emb[i] = max(MIN_EMBEDDING, 2 * int(np.log2(n_uniq[i])))

            _input = Input(shape=(1,), name=col)
            _embed = Embedding(input_dim=n_uniq[i], output_dim=n_emb[i], name=col + EMBEDDING_SUFFIX)(_input)
            _embed = Dropout(.2)(_embed)
            _embed = Reshape((n_emb[i],))(_embed)

            inputs.append(_input)
            embeddings.append(_embed)

        if num_cols:
            num_inputs = Input(shape=(len(num_cols),), name='num_inputs')
            merged_input = Concatenate(axis=1)(embeddings + [num_inputs])

            inputs = inputs + [num_inputs]
        else:
            merged_input = Concatenate(axis=1)(embeddings)

        x = BatchNormalization()(merged_input)
        x = Dense(128, activation='relu')(x)
        x = Dropout(.5)(x)
        x = BatchNormalization()(x)
        x = Dense(64, activation='relu')(x)
        x = Dropout(.5)(x)
        x = BatchNormalization()(x)
        output = Dense(1, activation=output_activation)(x)

        model = Model(inputs=inputs, outputs=output)

        return model, n_emb, n_uniq 
Example 26
Project: progressive_growing_of_GANs   Author: preritj   File: net.py    License: MIT License 5 votes vote down vote up
def __init__(self, cfg):
        self.alpha = cfg.leakyRelu_alpha
        input_size, _, nc = cfg.input_shape
        self.res = cfg.resolution
        self.min_res = cfg.min_resolution
        # number of times to upsample/downsample for full resolution:
        self.n_scalings = int(np.log2(input_size / self.min_res))
        # number of times to upsample/downsample for current resolution:
        self.n_layers = int(np.log2(self.res / self.min_res))
        self.nf_min = cfg.nf_min  # min feature depth
        self.nf_max = cfg.nf_max  # max feature depth
        self.batch_size = cfg.batch_size
        Model.__init__(self, cfg) 
Example 27
Project: imgcomp-cvpr   Author: fab-jul   File: bit_counter.py    License: GNU General Public License v3.0 5 votes vote down vote up
def _encode(foutid, syms, ctx_shape, get_freqs, get_pr, printer):
    """
    :param foutid:
    :param syms: CHW, padded
    :param ctx_shape:
    :param get_freqs:
    :param get_pr:
    :return:
    """
    with open(foutid, 'wb') as fout:
        bit_out = ac.CountingBitOutputStream(
            bit_out=ac.BitOutputStream(fout))
        enc = ac.ArithmeticEncoder(bit_out)
        ctx_sym_itr = _new_ctx_sym_itr(syms, ctx_shape=ctx_shape)
        # First sym is stored separately using log2(L) bits or sth
        first_ctx, first_sym = next(ctx_sym_itr)
        first_pr = get_pr(first_ctx)
        first_bc = -np.log2(first_pr[first_sym])
        theoretical_bit_cost = first_bc
        num_ctxs = _get_num_ctxs(syms.shape, ctx_shape)
        # Encode other symbols
        for i, (ctx, sym) in enumerate(ctx_sym_itr):
            freqs = get_freqs(ctx)
            pr = get_pr(ctx)
            theoretical_bit_cost += -np.log2(pr[sym])
            enc.write(freqs, sym)
            if i % 1000 == 0:
                printer('\rFeeding context for symbol #{}/{}...'.format(i, num_ctxs), end='', flush=True)
        printer('\r\033[K', end='')  # clear line
        enc.finish()
        bit_out.close()
        return bit_out.num_bits, first_sym, theoretical_bit_cost 
Example 28
Project: interpret-text   Author: interpretml   File: test_validate_explanations.py    License: MIT License 5 votes vote down vote up
def dcg(true_order_relevance, validate_order, top_values=10):
    # retrieve relevance score for each value in validation order
    relevance = np.vectorize(lambda x: true_order_relevance.get(x, 0))(
        validate_order[:top_values]
    )
    gain = 2 ** relevance - 1
    discount = np.log2(np.arange(1, len(gain) + 1) + 1)
    sum_dcg = np.sum(gain / discount)
    return sum_dcg


# TODO: remove this and replace with current contrib method once azureml-contrib-explain-model moved to release 
Example 29
Project: razzy-spinner   Author: rafasashi   File: hmm.py    License: GNU General Public License v3.0 5 votes vote down vote up
def _backward_probability(self, unlabeled_sequence):
        """
        Return the backward probability matrix, a T by N array of
        log-probabilities, where T is the length of the sequence and N is the
        number of states. Each entry (t, s) gives the probability of being in
        state s at time t after observing the partial symbol sequence from t
        .. T.

        :return: the backward log probability matrix
        :rtype:  array
        :param unlabeled_sequence: the sequence of unlabeled symbols
        :type unlabeled_sequence: list
        """
        T = len(unlabeled_sequence)
        N = len(self._states)
        beta = _ninf_array((T, N))

        transitions_logprob = self._transitions_matrix().T

        # initialise the backward values;
        # "1" is an arbitrarily chosen value from Rabiner tutorial
        beta[T-1, :] = np.log2(1)

        # inductively calculate remaining backward values
        for t in range(T-2, -1, -1):
            symbol = unlabeled_sequence[t+1][_TEXT]
            outputs = self._outputs_vector(symbol)

            for i in range(N):
                summand = transitions_logprob[i] + beta[t+1] + outputs
                beta[t, i] = logsumexp2(summand)

        return beta 
Example 30
Project: razzy-spinner   Author: rafasashi   File: hmm.py    License: GNU General Public License v3.0 5 votes vote down vote up
def logsumexp2(arr):
    max_ = arr.max()
    return np.log2(np.sum(2**(arr - max_))) + max_