Python numpy.log() Examples

The following are 60 code examples for showing how to use numpy.log(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

You may also check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: MPContribs   Author: materialsproject   File: pre_submission.py    License: MIT License 6 votes vote down vote up
def load_RSM(filename):
    om, tt, psd = xu.io.getxrdml_map(filename)
    om = np.deg2rad(om)
    tt = np.deg2rad(tt)
    wavelength = 1.54056

    q_y = (1 / wavelength) * (np.cos(tt) - np.cos(2 * om - tt))
    q_x = (1 / wavelength) * (np.sin(tt) - np.sin(2 * om - tt))

    xi = np.linspace(np.min(q_x), np.max(q_x), 100)
    yi = np.linspace(np.min(q_y), np.max(q_y), 100)
    psd[psd < 1] = 1
    data_grid = griddata(
        (q_x, q_y), psd, (xi[None, :], yi[:, None]), fill_value=1, method="cubic"
    )
    nx, ny = data_grid.shape

    range_values = [np.min(q_x), np.max(q_x), np.min(q_y), np.max(q_y)]
    output_data = (
        Panel(np.log(data_grid).reshape(nx, ny, 1), minor_axis=["RSM"])
        .transpose(2, 0, 1)
        .to_frame()
    )

    return range_values, output_data 
Example 2
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    License: MIT License 6 votes vote down vote up
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
Example 3
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    License: MIT License 6 votes vote down vote up
def cost0(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    return J 
Example 4
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    License: MIT License 6 votes vote down vote up
def cost(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    # add the cost regularization term
    J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2)))
    
    return J 
Example 5
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False):
    '''
    apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed,
      they are used to scale z.

    Note that this function can automatically rescale data into log-space if the colormap is a
    neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the
    optional argument logrescale=True.
    '''
    zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit)
    zs = np.asarray(zs, dtype='float')
    if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
    if logrescale:
        if vmin is None: vmin = np.log(np.nanmin(zs))
        if vmax is None: vmax = np.log(np.nanmax(zs))
        mn = np.exp(vmin)
        u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan)
    else:        
        if vmin is None: vmin = np.nanmin(zs)
        if vmax is None: vmax = np.nanmax(zs)
        u = zdivide(zs - vmin, vmax - vmin, null=np.nan)
    u[np.isnan(u)] = -np.inf
    return cmap(u) 
Example 6
Project: neuropythy   Author: noahbenson   File: retinotopy.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def to_logeccen(ecc, vmin=0, vmax=90, offset=0.75):
    '''
    to_logeccen(ecc) yields a rescaled log-space version of the eccentricity value (or values) ecc,
      which are extracted in degrees.
    to_logeccen(xy_matrix) rescales all the (x,y) points in the given matrix to have lox-spaced
      eccentricity values.

    to_logeccen is the inverse of from_logeccen.
    '''
    if pimms.is_matrix(ecc):
        xy = np.asarray(pimms.mag(ecc, 'deg'))
        trq = xy.shape[0] != 2
        xy = np.transpose(xy) if trq else np.asarray(xy)
        ecc = np.sqrt(np.sum(xy**2, axis=0))
        esc = to_logeccen(ecc, vmin=vmin, vmax=vmax, offset=offset)
        ecc = zinv(ecc)
        xy = xy * [ecc,ecc] * [esc,esc]
        return xy.T if trq else xy
    else:
        (ecc,vmin,vmax,offset) = [np.asarray(pimms.mag(u, 'deg')) for u in (ecc,vmin,vmax,offset)]
        log_ecc = np.log(ecc + offset)
        (vmin, vmax) = [np.log(u + offset) for u in (vmin, vmax)]
        return (log_ecc - vmin) / (vmax - vmin) 
Example 7
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    License: Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        mx.metric.check_label_shapes(labels, preds)

        for label, pred in zip(labels, preds):
            label = label.asnumpy()
            pred = pred.asnumpy()
            pred = np.column_stack((1 - pred, pred))

            label = label.ravel()
            num_examples = pred.shape[0]
            assert label.shape[0] == num_examples, (label.shape[0], num_examples)
            prob = pred[np.arange(num_examples, dtype=np.int64), np.int64(label)]
            self.sum_metric += (-np.log(prob + self.eps)).sum()
            self.num_inst += num_examples 
Example 8
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: rnn_cell_demo.py    License: Apache License 2.0 6 votes vote down vote up
def Perplexity(label, pred):
    """ Calculates prediction perplexity

    Args:
        label (mx.nd.array): labels array
        pred (mx.nd.array): prediction array

    Returns:
        float: calculated perplexity

    """

    # collapse the time, batch dimension
    label = label.reshape((-1,))
    pred = pred.reshape((-1, pred.shape[-1]))

    loss = 0.
    for i in range(pred.shape[0]):
        loss += -np.log(max(1e-10, pred[i][int(label[i])]))
    return np.exp(loss / label.size) 
Example 9
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    License: Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """
        Implementation of updating metrics
        """
        # get generated multi label from network
        cls_prob = preds[0].asnumpy()
        loc_loss = preds[1].asnumpy()
        cls_label = preds[2].asnumpy()
        valid_count = np.sum(cls_label >= 0)
        # overall accuracy & object accuracy
        label = cls_label.flatten()
        mask = np.where(label >= 0)[0]
        indices = np.int64(label[mask])
        prob = cls_prob.transpose((0, 2, 1)).reshape((-1, cls_prob.shape[1]))
        prob = prob[mask, indices]
        self.sum_metric[0] += (-np.log(prob + self.eps)).sum()
        self.num_inst[0] += valid_count
        # smoothl1loss
        self.sum_metric[1] += np.sum(loc_loss)
        self.num_inst[1] += valid_count 
Example 10
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: utils.py    License: Apache License 2.0 6 votes vote down vote up
def logging_config(name=None, level=logging.DEBUG, console_level=logging.DEBUG):
    if name is None:
        name = inspect.stack()[1][1].split('.')[0]
    folder = os.path.join(os.getcwd(), name)
    if not os.path.exists(folder):
        os.makedirs(folder)
    logpath = os.path.join(folder, name + ".log")
    print("All Logs will be saved to %s"  %logpath)
    logging.root.setLevel(level)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    logfile = logging.FileHandler(logpath)
    logfile.setLevel(level)
    logfile.setFormatter(formatter)
    logging.root.addHandler(logfile)
    #TODO Update logging patterns in other files
    logconsole = logging.StreamHandler()
    logconsole.setLevel(console_level)
    logconsole.setFormatter(formatter)
    logging.root.addHandler(logconsole)
    return folder 
Example 11
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    License: Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        pred = preds[self.pred.index('rpn_cls_prob')]
        label = labels[self.label.index('rpn_label')]

        # label (b, p)
        label = label.asnumpy().astype('int32').reshape((-1))
        # pred (b, c, p) or (b, c, h, w) --> (b, p, c) --> (b*p, c)
        pred = pred.asnumpy().reshape((pred.shape[0], pred.shape[1], -1)).transpose((0, 2, 1))
        pred = pred.reshape((label.shape[0], -1))

        # filter with keep_inds
        keep_inds = np.where(label != -1)[0]
        label = label[keep_inds]
        cls = pred[keep_inds, label]

        cls += 1e-14
        cls_loss = -1 * np.log(cls)
        cls_loss = np.sum(cls_loss)
        self.sum_metric += cls_loss
        self.num_inst += label.shape[0] 
Example 12
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    License: Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred in zip(labels, preds):
            label = label.asnumpy()
            pred = pred.asnumpy()

            label = label.ravel()
            assert label.shape[0] == pred.shape[0]

            prob = pred[numpy.arange(label.shape[0]), numpy.int64(label)]
            self.sum_metric += (-numpy.log(prob + self.eps)).sum()
            self.num_inst += label.shape[0] 
Example 13
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_loss.py    License: Apache License 2.0 6 votes vote down vote up
def test_bce_loss():
    N = 20
    data = mx.random.uniform(-1, 1, shape=(N, 20))
    label = mx.nd.array(np.random.randint(2, size=(N,)), dtype='float32')
    data_iter = mx.io.NDArrayIter(data, label, batch_size=10, label_name='label')
    output = get_net(1)
    l = mx.symbol.Variable('label')
    Loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    loss = Loss(output, l)
    loss = mx.sym.make_loss(loss)
    mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',))
    mod.fit(data_iter, num_epoch=200, optimizer_params={'learning_rate': 0.01},
            eval_metric=mx.metric.Loss(), optimizer='adam',
            initializer=mx.init.Xavier(magnitude=2))
    assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.01
    # Test against npy
    data = mx.random.uniform(-5, 5, shape=(10,))
    label = mx.random.uniform(0, 1, shape=(10,))
    mx_bce_loss = Loss(data, label).asnumpy()
    prob_npy = 1.0 / (1.0 + np.exp(-data.asnumpy()))
    label_npy = label.asnumpy()
    npy_bce_loss = - label_npy * np.log(prob_npy) - (1 - label_npy) * np.log(1 - prob_npy)
    assert_almost_equal(mx_bce_loss, npy_bce_loss, rtol=1e-4, atol=1e-5) 
Example 14
Project: numpynet   Author: uptake   File: common.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, choice="sigmoid"):
        """
        :param choice: Which activation function you want, must be in self.available
        """
        if choice not in self.available:
            msg = "Choice of activation (" + choice + ") not available!"
            log.out.error(msg)
            raise ValueError(msg)
        elif choice == "tanh":
            self.function = self._tanh
        elif choice == "tanhpos":
            self.function = self._tanhpos
        elif choice == "sigmoid":
            self.function = self._sigmoid
        elif choice == "softplus":
            self.function = self._softplus
        elif choice == "relu":
            self.function = self._relu
        elif choice == "leakyrelu":
            self.function = self._leakyrelu 
Example 15
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    License: Apache License 2.0 6 votes vote down vote up
def _compute_eps(log_moments, delta):
  """Compute epsilon for given log_moments and delta.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    delta: the target delta.
  Returns:
    epsilon
  """
  min_eps = float("inf")
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
  return min_eps 
Example 16
Project: libTLDA   Author: wmkouw   File: tcpr.py    License: MIT License 5 votes vote down vote up
def risk(self, Z, theta, q):
        """
        Compute target contrastive pessimistic risk.

        Parameters
        ----------
        Z : array
            target samples (M samples by D features)
        theta : array
            classifier parameters (D features by K classes)
        q : array
            soft labels (M samples by K classes)

        Returns
        -------
        float
            Value of risk function.

        """
        # Number of classes
        K = q.shape[1]

        # Compute negative log-likelihood
        L = self.neg_log_likelihood(Z, theta)

        # Weight loss by soft labels
        for k in range(K):
            L[:, k] *= q[:, k]

        # Sum over weighted losses
        L = np.sum(L, axis=1)

        # Risk is average loss
        return np.mean(L, axis=0) 
Example 17
Project: osqf2015   Author: mvaz   File: model.py    License: MIT License 5 votes vote down vote up
def likelihood_statistic(self, n_outliers, n_obs):
        p_obs = n_outliers * 1.0 / n_obs
        p_expected = 1. - self.level
        stat_expected = p_expected ** n_outliers * (1-p_expected) ** (n_obs-n_outliers)
        stat_obs = p_obs ** n_outliers * (1-p_obs) ** (n_obs - n_outliers)
        return -2 * np.log(stat_expected / stat_obs) 
Example 18
Project: osqf2015   Author: mvaz   File: model.py    License: MIT License 5 votes vote down vote up
def logreturns(self, n_days=1):
        self.ts['LogReturns'] = np.log( self.ts.Value.pct_change(periods=n_days) + 1) 
Example 19
Project: osqf2015   Author: mvaz   File: model.py    License: MIT License 5 votes vote down vote up
def devol(self, _lambda=0.06, n_days=1):
        _com = (1 - _lambda) / _lambda
        self.df['LogReturns'] = np.log(self.df.Close.pct_change(periods=n_days) + 1)
        self.df['Vola'] = pd.ewmstd( self.df.LogReturns, com=_com, ignore_na=True)[2:]
        self.df['DevolLogReturns'] = self.df.LogReturns / self.df.Vola
        self.df.set_index('Date', inplace=True) 
Example 20
Project: MPContribs   Author: materialsproject   File: pre_submission.py    License: MIT License 5 votes vote down vote up
def run(mpfile, **kwargs):

    input_dir = mpfile.hdata["_hdata"]["input_dir"]
    identifier = get_composition_from_string("PbZr20Ti80O3")
    print identifier

    # 'SP128_NSO_LPFM0000.ibw' too big to display in notebook
    files = ["BR_60016 (1).ibw", "SP128_NSO_VPFM0000.ibw"]
    for f in files:
        file_name = os.path.join(input_dir, f)
        df = load_data(file_name)
        name = f.split(".")[0]
        mpfile.add_data_table(identifier, df, name)
        print "imported", f

    xrd_file = os.path.join(input_dir, "Program6_JA_6_2th0m Near SRO (002)_2.xrdml.xml")
    data = read_xrdml(xrd_file)
    df = DataFrame(
        np.stack((data["2Theta"], data["data"]), 1), columns=["2Theta", "Intensity"]
    )
    opts = {"yaxis": {"type": "log"}}  # see plotly docs
    mpfile.add_data_table(identifier, df, "NearSRO", plot_options=opts)
    print "imported", os.path.basename(xrd_file)

    rsm_file = os.path.join(input_dir, "JA 42 RSM 103 STO 001.xrdml.xml")
    rvals, df = load_RSM(rsm_file)
    mpfile.add_hierarchical_data(
        {
            "rsm_range": {
                "x": "{} {}".format(rvals[0], rvals[1]),
                "y": "{} {}".format(rvals[2], rvals[3]),
            }
        },
        identifier=identifier,
    )
    mpfile.add_data_table(identifier, df, "RSM")
    print "imported", os.path.basename(rsm_file) 
Example 21
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    License: MIT License 5 votes vote down vote up
def log2(x):
    with tf.name_scope('Log2'):
        return tf.log(x) * np.float32(1.0 / np.log(2.0)) 
Example 22
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    License: MIT License 5 votes vote down vote up
def exp2(x):
    with tf.name_scope('Exp2'):
        return tf.exp(x * np.float32(np.log(2.0))) 
Example 23
Project: mmdetection   Author: open-mmlab   File: grid_head.py    License: Apache License 2.0 5 votes vote down vote up
def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                # TODO: compare mode = "fan_in" or "fan_out"
                kaiming_init(m)
        for m in self.modules():
            if isinstance(m, nn.ConvTranspose2d):
                normal_init(m, std=0.001)
        nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01)) 
Example 24
Project: mmdetection   Author: open-mmlab   File: regnet.py    License: Apache License 2.0 5 votes vote down vote up
def generate_regnet(self,
                        initial_width,
                        width_slope,
                        width_parameter,
                        depth,
                        divisor=8):
        """Generates per block width from RegNet parameters.

        Args:
            initial_width ([int]): Initial width of the backbone
            width_slope ([float]): Slope of the quantized linear function
            width_parameter ([int]): Parameter used to quantize the width.
            depth ([int]): Depth of the backbone.
            divisor (int, optional): The divisor of channels. Defaults to 8.

        Returns:
            list, int: return a list of widths of each stage and the number of
                stages
        """
        assert width_slope >= 0
        assert initial_width > 0
        assert width_parameter > 1
        assert initial_width % divisor == 0
        widths_cont = np.arange(depth) * width_slope + initial_width
        ks = np.round(
            np.log(widths_cont / initial_width) / np.log(width_parameter))
        widths = initial_width * np.power(width_parameter, ks)
        widths = np.round(np.divide(widths, divisor)) * divisor
        num_stages = len(np.unique(widths))
        widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
        return widths, num_stages 
Example 25
Project: subword-qac   Author: clovaai   File: generate.py    License: MIT License 5 votes vote down vote up
def log_sum_exp(a, b):
    return max(a, b) + np.log(1 + math.exp(-abs(a - b))) 
Example 26
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_tf.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def numpy_kl_with_logits(p_logits, q_logits):
    def numpy_softmax(logits):
        logits -= np.max(logits, axis=1, keepdims=True)
        exp_logits = np.exp(logits)
        return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)

    p = numpy_softmax(p_logits)
    log_p = p_logits - np.log(np.sum(np.exp(p_logits), axis=1, keepdims=True))
    log_q = q_logits - np.log(np.sum(np.exp(q_logits), axis=1, keepdims=True))
    return (p * (log_p - log_q)).sum(axis=1).mean() 
Example 27
Project: sklearn-audio-transfer-learning   Author: jordipons   File: mel_features.py    License: ISC License 5 votes vote down vote up
def hertz_to_mel(frequencies_hertz):
  """Convert frequencies to mel scale using HTK formula.

  Args:
    frequencies_hertz: Scalar or np.array of frequencies in hertz.

  Returns:
    Object of same size as frequencies_hertz containing corresponding values
    on the mel scale.
  """
  return _MEL_HIGH_FREQUENCY_Q * np.log(
      1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)) 
Example 28
Project: sklearn-audio-transfer-learning   Author: jordipons   File: mel_features.py    License: ISC License 5 votes vote down vote up
def log_mel_spectrogram(data,
                        audio_sample_rate=8000,
                        log_offset=0.0,
                        window_length_secs=0.025,
                        hop_length_secs=0.010,
                        **kwargs):
  """Convert waveform to a log magnitude mel-frequency spectrogram.

  Args:
    data: 1D np.array of waveform data.
    audio_sample_rate: The sampling rate of data.
    log_offset: Add this to values when taking log to avoid -Infs.
    window_length_secs: Duration of each window to analyze.
    hop_length_secs: Advance between successive analysis windows.
    **kwargs: Additional arguments to pass to spectrogram_to_mel_matrix.

  Returns:
    2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank
    magnitudes for successive frames.
  """
  window_length_samples = int(round(audio_sample_rate * window_length_secs))
  hop_length_samples = int(round(audio_sample_rate * hop_length_secs))
  fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0)))
  spectrogram = stft_magnitude(
      data,
      fft_length=fft_length,
      hop_length=hop_length_samples,
      window_length=window_length_samples)
  mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix(
      num_spectrogram_bins=spectrogram.shape[1],
      audio_sample_rate=audio_sample_rate, **kwargs))
  return np.log(mel_spectrogram + log_offset) 
Example 29
Project: spleeter   Author: deezer   File: convertor.py    License: MIT License 5 votes vote down vote up
def gain_to_db(tensor, espilon=10e-10):
    """ Convert from gain to decibel in tensorflow.

    :param tensor: Tensor to convert.
    :param epsilon: Operation constant.
    :returns: Converted tensor.
    """
    return 20. / np.log(10) * tf.math.log(tf.maximum(tensor, espilon)) 
Example 30
Project: deep-learning-note   Author: wdxtub   File: 7_gradient.py    License: MIT License 5 votes vote down vote up
def cross_entropy_error(y, t):
    if y.ndim == 1:
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)
    batch_size = y.shape[0]
    delta = 1e-7
    return -np.sum(t * np.log(y + delta))


# 假设 第三个位置是正确的 
Example 31
Project: deep-learning-note   Author: wdxtub   File: 1_generate_text.py    License: MIT License 5 votes vote down vote up
def sample(preds, temperature=1.0):
    # 给定模型预测,采样下一个字符的函数
    preds = np.asarray(preds).astype('float64')
    preds = np.log(preds) / temperature
    exp_preds = np.exp(preds)
    preds = exp_preds / np.sum(exp_preds)
    probas = np.random.multinomial(1, preds, 1)
    return np.argmax(probas) 
Example 32
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    License: MIT License 5 votes vote down vote up
def cost(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
    return np.sum(first - second) / len(X) + reg 
Example 33
Project: deep-learning-note   Author: wdxtub   File: 3_logistic_regression.py    License: MIT License 5 votes vote down vote up
def cost(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    return np.sum(first - second) / (len(X)) 
Example 34
Project: deep-learning-note   Author: wdxtub   File: 3_logistic_regression.py    License: MIT License 5 votes vote down vote up
def costReg(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
    return np.sum(first - second) / len(X) + reg 
Example 35
Project: NiBetaSeries   Author: HBClab   File: test_nilearn.py    License: MIT License 5 votes vote down vote up
def test_atlas_connectivity(betaseries_file, atlas_file, atlas_lut):
    # read in test files
    bs_data = nib.load(str(betaseries_file)).get_data()
    atlas_lut_df = pd.read_csv(str(atlas_lut), sep='\t')

    # expected output
    pcorr = np.corrcoef(bs_data.squeeze())
    np.fill_diagonal(pcorr, np.NaN)
    regions = atlas_lut_df['regions'].values
    pcorr_df = pd.DataFrame(pcorr, index=regions, columns=regions)
    expected_zcorr_df = pcorr_df.apply(lambda x: (np.log(1 + x) - np.log(1 - x)) * 0.5)

    # run instance of AtlasConnectivity
    ac = AtlasConnectivity(timeseries_file=str(betaseries_file),
                           atlas_file=str(atlas_file),
                           atlas_lut=str(atlas_lut))

    res = ac.run()

    output_zcorr_df = pd.read_csv(res.outputs.correlation_matrix,
                                  na_values='n/a',
                                  delimiter='\t',
                                  index_col=0)

    os.remove(res.outputs.correlation_matrix)
    # test equality of the matrices up to 3 decimals
    pd.testing.assert_frame_equal(output_zcorr_df, expected_zcorr_df,
                                  check_less_precise=3) 
Example 36
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def scale_for_cmap(cmap, x, vmin=Ellipsis, vmax=Ellipsis, unit=Ellipsis):
    '''
    scale_for_cmap(cmap, x) yields the values in x rescaled to be appropriate for the given
      colormap cmap. The cmap must be the name of a colormap or a colormap object.

    For a given cmap argument, if the object is a colormap itself, it is treated as cmap.name.
    If the cmap names a colormap known to neuropythy, neuropythy will rescale the values in x
    according to a heuristic.
    '''
    import matplotlib as mpl
    if isinstance(cmap, mpl.colors.Colormap): cmap = cmap.name
    (name, cm) = (None, None)
    if cmap not in colormaps:
        for (k,v) in six.iteritems(colormaps):
            if cmap in k:
                (name, cm) = (k, v)
                break
    else: (name, cm) = (cmap, colormaps[cmap])
    if cm is not None:
        cm = cm if len(cm) == 3 else (cm + (None,))
        (cm, (mn,mx), uu) = cm
        if vmin is Ellipsis: vmin = mn
        if vmax is Ellipsis: vmax = mx
        if unit is Ellipsis: unit = uu
    if vmin is Ellipsis: vmin = None
    if vmax is Ellipsis: vmax = None
    if unit is Ellipsis: unit = None
    x = pimms.mag(x) if unit is None else pimms.mag(x, unit)
    if name is not None and name.startswith('log_'):
        emn = np.exp(vmin)
        x = np.log(x + emn)
    vmin = np.nanmin(x) if vmin is None else vmin
    vmax = np.nanmax(x) if vmax is None else vmax
    return zdivide(x - vmin, vmax - vmin, null=np.nan) 
Example 37
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def guess_cortex_cmap(pname):
    '''
    guess_cortex_cmap(proptery_name) yields a tuple (cmap, (vmin, vmax)) of a cortical color map
      appropriate to the given property name and the suggested value scaling for the cmap. If the
      given property is not a string or is not recognized then the log_eccentricity axis is used
      and the suggested vmin and vmax are None.
    '''
    import matplotlib as mpl
    if isinstance(pname, mpl.colors.Colormap): pname = pname.name
    if not pimms.is_str(pname): return ('eccenflat', cmap_eccenflat, (None, None), None)
    if pname in colormaps: (cm,cmname) = (colormaps[pname],pname)
    else:
        # check each manually
        cm = None
        for (k,v) in six.iteritems(colormaps):
            if pname.endswith(k):
                (cmname,cm) = (k,v)
                break
        if cm is None:
            for (k,v) in six.iteritems(colormaps):
                if pname.startswith(k):
                    (cmname,cm) = (k,v)
                    break
    # we prefer log-eccentricity when possible
    if cm is None: return ('eccenflat', cmap_eccenflat, (None, None), None)
    if ('log_'+cmname) in colormaps:
        cmname = 'log_'+cmname
        cm = colormaps[cmname]
    return (cmname,) + (cm if len(cm) == 3 else cm + (None,)) 
Example 38
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def log_c(c): return np.log(c) 
Example 39
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def jacobian(self, params, into=None):
        zg  = self.g.value(params)
        dzg = self.g.jacobian(params)
        zh  = self.h.value(params)
        dzh = self.h.jacobian(params)
        z   = zg ** zh
        return safe_into(into, times(plus(times(dzg, zh, inv(zg)), times(dzh, np.log(zg))), z)) 
Example 40
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def jacobian(self, params, into=None):
        z  = self.f.value(params)
        dz = self.f.jacobian(params)
        if self.base is None:
            dz = divide(dz, z)
        else:
            b = self.base.value(params)
            db = self.base.jacobian(params)
            logb = np.log(b)
            dz = dz / logb - times(np.log(z), db) / (b * logb * logb)
        return safe_into(into, dz) 
Example 41
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def log(x, base=None):
    x = to_potential(x)
    xc = is_const_potential(x)
    if base is None:
        if xc: return PotentialConstant(np.log(x.c))
        else:  return PotentialLog(x)
    base = to_potential(base)
    bc = is_const_potential(base)
    if xc and bc: return PotentialConstant(np.log(x.c, bc.c))
    else:         return PotentialLog(x, base) 
Example 42
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def log2(x):  return log(x, 2) 
Example 43
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def log10(x): return log(x, 10) 
Example 44
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def nanlog(x, null=np.nan):
    '''
    nanlog(x) is equivalent to numpy.log(x) except that it avoids calling log on 0 and non-finie
      values; in place of these values, it returns the value null (which is nan by default).
    '''
    x = np.asarray(x)
    ii0 = np.where(np.isfinite(x))
    ii  = np.where(x[ii0] > 0)[0]
    if len(ii) == numel(x): return np.log(x)
    res = np.full(x.shape, null)
    ii = tuple([u[ii] for u in ii0])
    res[ii] = np.log(x[ii])
    return res 
Example 45
Project: fuku-ml   Author: fukuball   File: AdaBoostStump.py    License: MIT License 5 votes vote down vote up
def calculate_alpha_u(self, weak_learner, u):

        alpha = 0.0
        epsiloin = 0.0
        data_num = len(weak_learner.train_Y)

        for i in range(data_num):
            predict_string = np.array(list(map(str, weak_learner.train_X[i])))
            predict_string = ' '.join(predict_string[1:]) + ' ' + str(weak_learner.train_Y[i])
            prediction = weak_learner.prediction(predict_string, 'test_data')
            if (float(prediction['prediction']) != float(prediction['input_data_y'])):
                epsiloin += (u[i] * 1.0)

        epsiloin = epsiloin / np.sum(u)
        tune_alpha = np.sqrt((1.0 - epsiloin) / epsiloin)
        alpha = np.log(tune_alpha)

        new_u = []

        for i in range(data_num):
            predict_string = np.array(list(map(str, weak_learner.train_X[i])))
            predict_string = ' '.join(predict_string[1:]) + ' ' + str(weak_learner.train_Y[i])
            prediction = weak_learner.prediction(predict_string, 'test_data')
            if (float(prediction['prediction']) != float(prediction['input_data_y'])):
                new_u.append(u[i] * tune_alpha)
            else:
                new_u.append(u[i] / tune_alpha)

        return alpha, np.array(new_u) 
Example 46
Project: fuku-ml   Author: fukuball   File: AdaBoostDecisionTree.py    License: MIT License 5 votes vote down vote up
def calculate_alpha_u(self, weak_learner, u):

        alpha = 0.0
        epsiloin = 0.0
        data_num = len(weak_learner.train_Y)

        for i in range(data_num):
            predict_string = np.array(list(map(str, weak_learner.train_X[i])))
            predict_string = ' '.join(predict_string[1:]) + ' ' + str(weak_learner.train_Y[i])
            prediction = weak_learner.prediction(predict_string, 'test_data')
            if (float(prediction['prediction']) != float(prediction['input_data_y'])):
                epsiloin += (u[i] * 1.0)

        epsiloin = epsiloin / np.sum(u)
        tune_alpha = np.sqrt((1.0 - epsiloin) / epsiloin)
        alpha = np.log(tune_alpha)

        new_u = []

        for i in range(data_num):
            predict_string = np.array(list(map(str, weak_learner.train_X[i])))
            predict_string = ' '.join(predict_string[1:]) + ' ' + str(weak_learner.train_Y[i])
            prediction = weak_learner.prediction(predict_string, 'test_data')
            if (float(prediction['prediction']) != float(prediction['input_data_y'])):
                new_u.append(u[i] * tune_alpha)
            else:
                new_u.append(u[i] / tune_alpha)

        new_u = np.array(new_u) / np.sum(new_u)

        return alpha, new_u 
Example 47
Project: fuku-ml   Author: fukuball   File: ProbabilisticSVM.py    License: MIT License 5 votes vote down vote up
def error_function(self, x, y, W):

        svm_process_x = self.svm_score(x)
        svm_process_x = [1] + [svm_process_x]

        error = np.log(1 + np.exp((-1) * y * np.inner(svm_process_x, self.logistic_processor.W)))

        return error 
Example 48
Project: fuku-ml   Author: fukuball   File: KernelLogisticRegression.py    License: MIT License 5 votes vote down vote up
def error_function(self, x, y, W):

        x = x[1:]
        original_X = self.train_X[:, 1:]
        score = np.sum(self.beta * utility.Kernel.kernel_matrix_xX(self, x, original_X))
        error = np.log(1 + np.exp((-1) * y * score))

        return error 
Example 49
Project: fuku-ml   Author: fukuball   File: LogisticRegression.py    License: MIT License 5 votes vote down vote up
def error_function(self, x, y, W):
        # need refector

        '''
        Error function to calculate error: cross entropy error
        '''

        error = np.log(1 + np.exp((-1) * y * np.inner(x, W)))

        return error 
Example 50
Project: fullrmc   Author: bachiraoun   File: Collection.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def gaussian(x, center=0, FWHM=1, normalize=True, check=True):
    """
    Compute the normal distribution or gaussian distribution of a given vector.
    The probability density of the gaussian distribution is:
    :math:`f(x,\\mu,\\sigma) = \\frac{1}{\\sigma\\sqrt{2\\pi}} e^{\\frac{-(x-\\mu)^{2}}{2\\sigma^2}}`

    Where:\n
    * :math:`\\mu` is the center of the gaussian, it is the mean or
      expectation of the distribution it is called the distribution's
      median or mode.
    * :math:`\\sigma` is its standard deviation.
    * :math:`FWHM=2\\sqrt{2 ln 2} \\sigma` is the Full Width at Half
      Maximum of the gaussian.

    :Parameters:
        #. x (numpy.ndarray): The vector to compute the gaussian
        #. center (number): The center of the gaussian.
        #. FWHM (number): The Full Width at Half Maximum of the gaussian.
        #. normalize(boolean): Whether to normalize the generated gaussian
           by :math:`\\frac{1}{\\sigma\\sqrt{2\\pi}}` so the integral
           is equal to 1.
        #. check (boolean): whether to check arguments before generating
           vectors.
    """
    if check:
        assert is_number(center), LOGGER.error("center must be a number")
        center = FLOAT_TYPE(center)
        assert is_number(FWHM), LOGGER.error("FWHM must be a number")
        FWHM = FLOAT_TYPE(FWHM)
        assert FWHM>0, LOGGER.error("FWHM must be bigger than 0")
        assert isinstance(normalize, bool), LOGGER.error("normalize must be boolean")
    sigma       = FWHM/(2.*np.sqrt(2*np.log(2)))
    expKernel   = ((x-center)**2) / (-2*sigma**2)
    exp         = np.exp(expKernel)
    scaleFactor = 1.
    if normalize:
        scaleFactor /= sigma*np.sqrt(2*np.pi)
    return (scaleFactor * exp).astype(FLOAT_TYPE) 
Example 51
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: lstm_bucketing.py    License: Apache License 2.0 5 votes vote down vote up
def Perplexity(label, pred):
    label = label.T.reshape((-1,))
    loss = 0.
    for i in range(pred.shape[0]):
        loss += -np.log(max(1e-10, pred[i][int(label[i])]))
    return np.exp(loss / label.size) 
Example 52
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: rnn_cell_demo.py    License: Apache License 2.0 5 votes vote down vote up
def Perplexity(label, pred):
    # TODO(tofix): we make a transpose of label here, because when
    # using the RNN cell, we called swap axis to the data.
    label = label.T.reshape((-1,))
    loss = 0.
    for i in range(pred.shape[0]):
        loss += -np.log(max(1e-10, pred[i][int(label[i])]))
    return np.exp(loss / label.size) 
Example 53
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: gru_bucketing.py    License: Apache License 2.0 5 votes vote down vote up
def Perplexity(label, pred):
    label = label.T.reshape((-1,))
    loss = 0.
    for i in range(pred.shape[0]):
        loss += -np.log(max(1e-10, pred[i][int(label[i])]))
    return np.exp(loss / label.size) 
Example 54
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, eps=1e-12, name='log-loss',
                 output_names=None, label_names=None):
        super(LogLossMetric, self).__init__(
            name, eps=eps,
            output_names=output_names, label_names=label_names)
        self.eps = eps 
Example 55
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: stt_metric.py    License: Apache License 2.0 5 votes vote down vote up
def update(self, labels, preds):
        check_label_shapes(labels, preds)
        if self.is_logging:
            log = LogUtil().getlogger()
            labelUtil = LabelUtil.getInstance()
        self.batch_loss = 0.

        for label, pred in zip(labels, preds):
            label = label.asnumpy()
            pred = pred.asnumpy()

            seq_length = len(pred) / int(int(self.batch_size) / int(self.num_gpu))

            for i in range(int(int(self.batch_size) / int(self.num_gpu))):
                l = remove_blank(label[i])
                p = []
                for k in range(int(seq_length)):
                    p.append(np.argmax(pred[k * int(int(self.batch_size) / int(self.num_gpu)) + i]))
                p = pred_best(p)

                l_distance = levenshtein_distance(l, p)
                self.total_n_label += len(l)
                self.total_l_dist += l_distance
                this_cer = float(l_distance) / float(len(l))
                if self.is_logging:
                    log.info("label: %s " % (labelUtil.convert_num_to_word(l)))
                    log.info("pred : %s , cer: %f (distance: %d/ label length: %d)" % (
                        labelUtil.convert_num_to_word(p), this_cer, l_distance, len(l)))
                self.num_inst += 1
                self.sum_metric += this_cer
                if self.is_epoch_end:
                    loss = ctc_loss(l, pred, i, int(seq_length), int(self.batch_size), int(self.num_gpu))
                    self.batch_loss += loss
                    if self.is_logging:
                        log.info("loss: %f " % loss)
        self.total_ctc_loss += self.batch_loss 
Example 56
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: stt_metric.py    License: Apache License 2.0 5 votes vote down vote up
def ctc_loss(label, prob, remainder, seq_length, batch_size, num_gpu=1, big_num=1e10):
    label_ = [0, 0]
    prob[prob < 1 / big_num] = 1 / big_num
    log_prob = np.log(prob)

    l = len(label)
    for i in range(l):
        label_.append(int(label[i]))
        label_.append(0)

    l_ = 2 * l + 1
    a = np.full((seq_length, l_ + 1), -big_num)
    a[0][1] = log_prob[remainder][0]
    a[0][2] = log_prob[remainder][label_[2]]
    for i in range(1, seq_length):
        row = i * int(batch_size / num_gpu) + remainder
        a[i][1] = a[i - 1][1] + log_prob[row][0]
        a[i][2] = np.logaddexp(a[i - 1][2], a[i - 1][1]) + log_prob[row][label_[2]]
        for j in range(3, l_ + 1):
            a[i][j] = np.logaddexp(a[i - 1][j], a[i - 1][j - 1])
            if label_[j] != 0 and label_[j] != label_[j - 2]:
                a[i][j] = np.logaddexp(a[i][j], a[i - 1][j - 2])
            a[i][j] += log_prob[row][label_[j]]

    return -np.logaddexp(a[seq_length - 1][l_], a[seq_length - 1][l_ - 1])


# label is done with remove_blank
# pred is got from pred_best 
Example 57
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: dec.py    License: Apache License 2.0 5 votes vote down vote up
def setup(self, X, num_centers, alpha, save_to='dec_model'):
        sep = X.shape[0]*9//10
        X_train = X[:sep]
        X_val = X[sep:]
        ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
        if not os.path.exists(save_to+'_pt.arg'):
            ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
                                        lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
            ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
                              lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
            ae_model.save(save_to+'_pt.arg')
            logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
            logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
        else:
            ae_model.load(save_to+'_pt.arg')
        self.ae_model = ae_model

        self.dec_op = DECModel.DECLoss(num_centers, alpha)
        label = mx.sym.Variable('label')
        self.feature = self.ae_model.encoder
        self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
        self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
        self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
        self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
        self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
        self.num_centers = num_centers 
Example 58
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: lstm_sort.py    License: Apache License 2.0 5 votes vote down vote up
def Perplexity(label, pred):
    label = label.T.reshape((-1,))
    loss = 0.
    for i in range(pred.shape[0]):
        loss += -np.log(max(1e-10, pred[i][int(label[i])]))
    return np.exp(loss / label.size) 
Example 59
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: vaegan_mxnet.py    License: Apache License 2.0 5 votes vote down vote up
def GaussianLogDensity(x, mu, log_var, name='GaussianLogDensity', EPSILON = 1e-6):
    '''GaussianLogDensity loss calculation for layer wise loss
    '''
    c = mx.sym.ones_like(log_var)*2.0 * 3.1416
    c = mx.symbol.log(c)
    var = mx.sym.exp(log_var)
    x_mu2 = mx.symbol.square(x - mu)   # [Issue] not sure the dim works or not?
    x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON)
    log_prob = -0.5 * (c + log_var + x_mu2_over_var)
    log_prob = mx.symbol.sum(log_prob, axis=1, name=name)   # keep_dims=True,
    return log_prob 
Example 60
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: utils.py    License: Apache License 2.0 5 votes vote down vote up
def npy_binary_entropy(prediction, target):
    assert prediction.shape == target.shape
    return - (numpy.log(prediction + 1E-9) * target +
              numpy.log(1 - prediction + 1E-9) * (1 - target)).sum()