Python numpy.tanh() Examples

The following are 30 code examples of numpy.tanh(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: faster_gru.py    From knmt with GNU General Public License v3.0 6 votes vote down vote up
def faster_call2(self, h, x):
        r_z_h_x = self.W_r_z_h(x)

        r_z_h = self.U_r_z(h)

        r_x, z_x, h_x = split_axis(r_z_h_x, (self.n_units, self.n_units * 2), axis=1)
        assert r_x.data.shape[1] == self.n_units
        assert z_x.data.shape[1] == self.n_units
        assert h_x.data.shape[1] == self.n_units

        r_h, z_h = split_axis(r_z_h, (self.n_units,), axis=1)
#         r = sigmoid.sigmoid(r_x + r_h)
#         z = sigmoid.sigmoid(z_x + z_h)
#         h_bar = tanh.tanh(h_x + self.U(sigm_a_plus_b_by_h(r_x, r_h, h)))
#         h_new = (1 - z) * h + z * h_bar
#         return h_new

        return compute_output_GRU(z_x, z_h, h_x, h, self.U(sigm_a_plus_b_by_h_fast(r_x, r_h, h))) 
Example #2
Source File: iGAN_predict.py    From iGAN with MIT License 6 votes vote down vote up
def invert_bfgs(gen_model, invert_model, ftr_model, im, z_predict=None, npx=64):
    _f, z = invert_model
    nz = gen_model.nz
    if z_predict is None:
        z_predict = np_rng.uniform(-1., 1., size=(1, nz))
    else:
        z_predict = floatX(z_predict)
    z_predict = np.arctanh(z_predict)
    im_t = gen_model.transform(im)
    ftr = ftr_model(im_t)

    prob = optimize.minimize(f_bfgs, z_predict, args=(_f, im_t, ftr),
                             tol=1e-6, jac=True, method='L-BFGS-B', options={'maxiter': 200})
    print('n_iters = %3d, f = %.3f' % (prob.nit, prob.fun))
    z_opt = prob.x
    z_opt_n = floatX(z_opt[np.newaxis, :])
    [f_opt, g, gx] = _f(z_opt_n, im_t, ftr)
    gx = gen_model.inverse_transform(gx, npx=npx)
    z_opt = np.tanh(z_opt)
    return gx, z_opt, f_opt 
Example #3
Source File: transformers.py    From deepchem with MIT License 6 votes vote down vote up
def expand(self, X):
    """Binarize features.

    Parameters:
    ----------
    X: np.ndarray
      Features

    Returns:
    -------
    X: np.ndarray
      Binarized features

    """
    Xexp = []
    for i in range(X.shape[1]):
      for k in np.arange(0, self.max[i] + self.step, self.step):
        Xexp += [np.tanh((X[:, i] - k) / self.step)]
    return np.array(Xexp).T 
Example #4
Source File: function_approximation.py    From PRML with MIT License 6 votes vote down vote up
def sum_of_squares_error(xlist, tlist, w1, w2):
    """二乗誤差和を計算する"""
    error = 0.0
    for n in range(N):
        z = np.zeros(NUM_HIDDEN)
        y = np.zeros(NUM_OUTPUT)
        # バイアスの1を先頭に挿入
        x = np.insert(xlist[n], 0, 1)
        # 順伝播で出力を計算
        for j in range(NUM_HIDDEN):
            a = np.zeros(NUM_HIDDEN)
            for i in range(NUM_INPUT):
                a[j] += w1[j, i] * x[i]
            z[j] = np.tanh(a[j])

        for k in range(NUM_OUTPUT):
            for j in range(NUM_HIDDEN):
                y[k] += w2[k, j] * z[j]
        # 二乗誤差を計算
        for k in range(NUM_OUTPUT):
            error += 0.5 * (y[k] - tlist[n, k]) * (y[k] - tlist[n, k])
    return error 
Example #5
Source File: function_approximation.py    From PRML with MIT License 6 votes vote down vote up
def output(x, w1, w2):
    """xを入力したときのニューラルネットワークの出力を計算
    隠れユニットの出力も一緒に返す"""
    # 配列に変換して先頭にバイアスの1を挿入
    x = np.insert(x, 0, 1)
    z = np.zeros(NUM_HIDDEN)
    y = np.zeros(NUM_OUTPUT)
    # 順伝播で出力を計算
    for j in range(NUM_HIDDEN):
        a = np.zeros(NUM_HIDDEN)
        for i in range(NUM_INPUT):
            a[j] += w1[j, i] * x[i]
        z[j] = np.tanh(a[j])
    for k in range(NUM_OUTPUT):
        for j in range(NUM_HIDDEN):
            y[k] += w2[k, j] * z[j]
    return y, z 
Example #6
Source File: animation.py    From PRML with MIT License 6 votes vote down vote up
def sum_of_squares_error(xlist, tlist, w1, w2):
    """二乗誤差和を計算する"""
    error = 0.0
    for n in range(N):
        z = np.zeros(NUM_HIDDEN)
        y = np.zeros(NUM_OUTPUT)
        # バイアスの1を先頭に挿入
        x = np.insert(xlist[n], 0, 1)
        # 順伝播で出力を計算
        for j in range(NUM_HIDDEN):
            a = np.zeros(NUM_HIDDEN)
            a[j] = np.dot(w1[j, :], x)
            z[j] = np.tanh(a[j])
        for k in range(NUM_OUTPUT):
            y[k] = np.dot(w2[k, :], z)
        # 二乗誤差を計算
        for k in range(NUM_OUTPUT):
            error += 0.5 * (y[k] - tlist[n, k]) * (y[k] - tlist[n, k])
    return error 
Example #7
Source File: sawyer_nut_assembly.py    From robosuite with MIT License 6 votes vote down vote up
def _check_success(self):
        """
        Returns True if task has been completed.
        """

        # remember objects that are on the correct pegs
        gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
        for i in range(len(self.ob_inits)):
            obj_str = str(self.item_names[i]) + "0"
            obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]]
            dist = np.linalg.norm(gripper_site_pos - obj_pos)
            r_reach = 1 - np.tanh(10.0 * dist)
            self.objects_on_pegs[i] = int(self.on_peg(obj_pos, i) and r_reach < 0.6)

        if self.single_object_mode > 0:
            return np.sum(self.objects_on_pegs) > 0  # need one object on peg

        # returns True if all objects are on correct pegs
        return np.sum(self.objects_on_pegs) == len(self.ob_inits) 
Example #8
Source File: panda_pick_place.py    From robosuite with MIT License 6 votes vote down vote up
def _check_success(self):
        """
        Returns True if task has been completed.
        """

        # remember objects that are in the correct bins
        gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
        for i in range(len(self.ob_inits)):
            obj_str = str(self.item_names[i]) + "0"
            obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]]
            dist = np.linalg.norm(gripper_site_pos - obj_pos)
            r_reach = 1 - np.tanh(10.0 * dist)
            self.objects_in_bins[i] = int(
                (not self.not_in_bin(obj_pos, i)) and r_reach < 0.6
            )

        # returns True if a single object is in the correct bin
        if self.single_object_mode == 1 or self.single_object_mode == 2:
            return np.sum(self.objects_in_bins) > 0

        # returns True if all objects are in correct bins
        return np.sum(self.objects_in_bins) == len(self.ob_inits) 
Example #9
Source File: sawyer_pick_place.py    From robosuite with MIT License 6 votes vote down vote up
def _check_success(self):
        """
        Returns True if task has been completed.
        """

        # remember objects that are in the correct bins
        gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
        for i in range(len(self.ob_inits)):
            obj_str = str(self.item_names[i]) + "0"
            obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]]
            dist = np.linalg.norm(gripper_site_pos - obj_pos)
            r_reach = 1 - np.tanh(10.0 * dist)
            self.objects_in_bins[i] = int(
                (not self.not_in_bin(obj_pos, i)) and r_reach < 0.6
            )

        # returns True if a single object is in the correct bin
        if self.single_object_mode == 1 or self.single_object_mode == 2:
            return np.sum(self.objects_in_bins) > 0

        # returns True if all objects are in correct bins
        return np.sum(self.objects_in_bins) == len(self.ob_inits) 
Example #10
Source File: panda_nut_assembly.py    From robosuite with MIT License 6 votes vote down vote up
def _check_success(self):
        """
        Returns True if task has been completed.
        """

        # remember objects that are on the correct pegs
        gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
        for i in range(len(self.ob_inits)):
            obj_str = str(self.item_names[i]) + "0"
            obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]]
            dist = np.linalg.norm(gripper_site_pos - obj_pos)
            r_reach = 1 - np.tanh(10.0 * dist)
            self.objects_on_pegs[i] = int(self.on_peg(obj_pos, i) and r_reach < 0.6)

        if self.single_object_mode > 0:
            return np.sum(self.objects_on_pegs) > 0  # need one object on peg

        # returns True if all objects are on correct pegs
        return np.sum(self.objects_on_pegs) == len(self.ob_inits) 
Example #11
Source File: common.py    From numpynet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, choice="sigmoid"):
        """
        :param choice: Which activation function you want, must be in self.available
        """
        if choice not in self.available:
            msg = "Choice of activation (" + choice + ") not available!"
            log.out.error(msg)
            raise ValueError(msg)
        elif choice == "tanh":
            self.function = self._tanh
        elif choice == "tanhpos":
            self.function = self._tanhpos
        elif choice == "sigmoid":
            self.function = self._sigmoid
        elif choice == "softplus":
            self.function = self._softplus
        elif choice == "relu":
            self.function = self._relu
        elif choice == "leakyrelu":
            self.function = self._leakyrelu 
Example #12
Source File: nonlinearities.py    From nsf with MIT License 6 votes vote down vote up
def forward(self, inputs, context=None):
        mask_right = (inputs > self.cut_point)
        mask_left = (inputs < -self.cut_point)
        mask_middle = ~(mask_right | mask_left)

        outputs = torch.zeros_like(inputs)
        outputs[mask_middle] = torch.tanh(inputs[mask_middle])
        outputs[mask_right] = self.alpha * torch.log(self.beta * inputs[mask_right])
        outputs[mask_left] = self.alpha * -torch.log(-self.beta * inputs[mask_left])

        logabsdet = torch.zeros_like(inputs)
        logabsdet[mask_middle] = torch.log(1 - outputs[mask_middle] ** 2)
        logabsdet[mask_right] = torch.log(self.alpha / inputs[mask_right])
        logabsdet[mask_left] = torch.log(-self.alpha / inputs[mask_left])
        logabsdet = utils.sum_except_batch(logabsdet, num_batch_dims=1)

        return outputs, logabsdet 
Example #13
Source File: net.py    From exposure with MIT License 6 votes vote down vote up
def draw_value_reward_score(self, img, value, reward, score):
    img = img.copy()
    # Average with 0.5 for semi-transparent background
    img[:14] = img[:14] * 0.5 + 0.25
    img[50:] = img[50:] * 0.5 + 0.25
    if self.cfg.gan == 'ls':
      red = -np.tanh(float(score) / 1) * 0.5 + 0.5
    else:
      red = -np.tanh(float(score) / 10.0) * 0.5 + 0.5
    top = '%+.2f %+.2f' % (value, reward)
    cv2.putText(img, top, (3, 7), cv2.FONT_HERSHEY_SIMPLEX, 0.25,
                (1.0, 1.0 - red, 1.0 - red))
    score = '%+.3f' % score
    cv2.putText(img, score, (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                (1.0, 1.0 - red, 1.0 - red))
    return img 
Example #14
Source File: test_core.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_testUfuncRegression(self):
        # Tests new ufuncs on MaskedArrays.
        for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
                  'sin', 'cos', 'tan',
                  'arcsin', 'arccos', 'arctan',
                  'sinh', 'cosh', 'tanh',
                  'arcsinh',
                  'arccosh',
                  'arctanh',
                  'absolute', 'fabs', 'negative',
                  'floor', 'ceil',
                  'logical_not',
                  'add', 'subtract', 'multiply',
                  'divide', 'true_divide', 'floor_divide',
                  'remainder', 'fmod', 'hypot', 'arctan2',
                  'equal', 'not_equal', 'less_equal', 'greater_equal',
                  'less', 'greater',
                  'logical_and', 'logical_or', 'logical_xor',
                  ]:
            try:
                uf = getattr(umath, f)
            except AttributeError:
                uf = getattr(fromnumeric, f)
            mf = getattr(numpy.ma.core, f)
            args = self.d[:uf.nin]
            ur = uf(*args)
            mr = mf(*args)
            assert_equal(ur.filled(0), mr.filled(0), f)
            assert_mask_equal(ur.mask, mr.mask, err_msg=f) 
Example #15
Source File: faster_gru.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def forward_gpu(self, x):
        z_x, z_h, h_x, h, hh = x
        self.z, self.h_bar, h_new = cuda.elementwise(
            'T z_x, T z_h, T h_x, T h, T hh',
            'T z, T h_bar, T h_new',
            '''
                z = tanh((z_x + z_h) * 0.5) * 0.5 + 0.5;
                //z = 1.0/ ( 1 + exp(- (z_x + z_h)));
                h_bar = tanh(h_x + hh);
                h_new = (1 - z) * h + z * h_bar;
                ''',
            'compute_output_gru_fwd')(z_x, z_h, h_x, h, hh)
        return h_new, 
Example #16
Source File: ln_lstm.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def _sigmoid(x):
    half = x.dtype.type(0.5)
    return numpy.tanh(x * half) * half + half 
Example #17
Source File: faster_gru.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def forward_cpu(self, x):
        self.sigma_a_plus_b = (numpy.tanh((x[0] + x[1]) * 0.5) * 0.5 + 0.5)
        return x[2] * self.sigma_a_plus_b, 
Example #18
Source File: faster_gru.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def forward_gpu(self, x):
        z, h, hh = x
        self.h_bar, h_new = cuda.elementwise(
            'T z, T h, T hh',
            'T h_bar, T h_new',
            '''
                h_bar = tanh(hh);
                h_new = (1 - z) * h + z * h_bar;
                ''',
            'compute_output_gru_fwd')(z, h, hh)
        return h_new, 
Example #19
Source File: ln_lstm.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, inputs):
        c_prev, x = inputs
        a, i, f, o = _extract_gates(x)
        batch = len(x)

        if isinstance(x, numpy.ndarray):
            self.a = numpy.tanh(a)
            self.i = _sigmoid(i)
            self.f = _sigmoid(f)
            self.o = _sigmoid(o)

            c_next = numpy.empty_like(c_prev)
            c_next[:batch] = self.a * self.i + self.f * c_prev[:batch]
            ungated_h = numpy.tanh(c_next[:batch])
            o_gate = self.o
        else:
            c_next = cuda.cupy.empty_like(c_prev)
            ungated_h = cuda.cupy.empty_like(c_next[:batch])
            o_gate = cuda.cupy.empty_like(c_next[:batch])
            cuda.elementwise(
                'T c_prev, T a, T i_, T f, T o', 'T c, T ungated_h, T o_gate',
                '''
                    COMMON_ROUTINE;
                    c = aa * ai + af * c_prev;
                    ungated_h = tanh(c);
                    o_gate = ao;
                ''',
                'lstm_fwd', preamble=_preamble)(
                    c_prev[:batch], a, i, f, o, c_next[:batch], ungated_h, o_gate)

        c_next[batch:] = c_prev[batch:]
        self.c = c_next[:batch]
        return c_next, ungated_h, o_gate 
Example #20
Source File: faster_gru.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def forward_gpu(self, x):
        self.sigma_a_plus_b, y = cuda.elementwise(
            'T x1, T x2, T x3', 'T sigma_a_plus_b, T y',
            '''
                sigma_a_plus_b = tanh((x1 + x2) * 0.5) * 0.5 + 0.5;// 1 / (1 + exp(-(x1 + x2)));
                y = x3 * sigma_a_plus_b;
                ''',
            'sigmoid_a_plus_b_by_h_fwd')(x[0], x[1], x[2])
        return y, 
Example #21
Source File: ortho_plane_visualization.py    From ffn with Apache License 2.0 5 votes vote down vote up
def normalize_image(img2d, act=None):
  """Map unbounded grey image to [0,1]-RGB, r:negative, b:positive, g:nan.

  Args:
    img2d: (x,y) image array, channels are not supported.
    act: ([None]|'tanh'|'sig') optional activation function to scale grey
      values. None means normalized between min and 0 for negative values and
      between 0 and max for positive values.

  Returns:
    img_rgb: (x,y,3) image array
  """
  nan_mask = np.isnan(img2d)
  img2d[nan_mask] = 0
  m, mm = img2d.min(), img2d.max()
  img_rgb = np.zeros(img2d.shape + (3,), dtype=np.float32)
  if act == 'tanh':
    img_rgb[~nan_mask, 0] = np.tanh(np.clip(img2d, m, 0))[~nan_mask]
    img_rgb[~nan_mask, 2] = np.tanh(np.clip(img2d, 0, mm))[~nan_mask]
  elif act == 'sig':
    img_rgb[~nan_mask, 0] = sigmoid(img2d[~nan_mask])
    img_rgb[~nan_mask, 2] = img_rgb[~nan_mask, 0]
  else:
    img_rgb[~nan_mask, 0] = (np.clip(img2d, m, 0) / m)[~nan_mask]
    img_rgb[~nan_mask, 2] = (np.clip(img2d, 0, mm) / mm)[~nan_mask]

  img_rgb[nan_mask, 1] = 1.0
  return img_rgb 
Example #22
Source File: faster_gru.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def compute_GRU_out_2(z, h, hh):
    h_bar = F.tanh(hh)
    h_new = (1 - z) * h + z * h_bar
    return h_new 
Example #23
Source File: faster_gru.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def faster_call(self, h, x):
        r_z_h_x = self.W_r_z_h(x)
        r_x, z_x, h_x = split_axis(r_z_h_x, (self.n_units, self.n_units * 2), axis=1)
        assert r_x.data.shape[1] == self.n_units
        assert z_x.data.shape[1] == self.n_units
        assert h_x.data.shape[1] == self.n_units

        r_z_h = self.U_r_z(h)
        r_h, z_h = split_axis(r_z_h, (self.n_units,), axis=1)

        r = sigmoid.sigmoid(r_x + r_h)
        z = sigmoid.sigmoid(z_x + z_h)
        h_bar = tanh.tanh(h_x + self.U(r * h))
        h_new = (1 - z) * h + z * h_bar
        return h_new 
Example #24
Source File: test_core.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_basic_ufuncs(self):
        # Test various functions such as sin, cos.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.cos(x), cos(xm))
        assert_equal(np.cosh(x), cosh(xm))
        assert_equal(np.sin(x), sin(xm))
        assert_equal(np.sinh(x), sinh(xm))
        assert_equal(np.tan(x), tan(xm))
        assert_equal(np.tanh(x), tanh(xm))
        assert_equal(np.sqrt(abs(x)), sqrt(xm))
        assert_equal(np.log(abs(x)), log(xm))
        assert_equal(np.log10(abs(x)), log10(xm))
        assert_equal(np.exp(x), exp(xm))
        assert_equal(np.arcsin(z), arcsin(zm))
        assert_equal(np.arccos(z), arccos(zm))
        assert_equal(np.arctan(z), arctan(zm))
        assert_equal(np.arctan2(x, y), arctan2(xm, ym))
        assert_equal(np.absolute(x), absolute(xm))
        assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
        assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
        assert_equal(np.equal(x, y), equal(xm, ym))
        assert_equal(np.not_equal(x, y), not_equal(xm, ym))
        assert_equal(np.less(x, y), less(xm, ym))
        assert_equal(np.greater(x, y), greater(xm, ym))
        assert_equal(np.less_equal(x, y), less_equal(xm, ym))
        assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
        assert_equal(np.conjugate(x), conjugate(xm)) 
Example #25
Source File: test_old_ma.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_testUfuncRegression(self):
        f_invalid_ignore = [
            'sqrt', 'arctanh', 'arcsin', 'arccos',
            'arccosh', 'arctanh', 'log', 'log10', 'divide',
            'true_divide', 'floor_divide', 'remainder', 'fmod']
        for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
                  'sin', 'cos', 'tan',
                  'arcsin', 'arccos', 'arctan',
                  'sinh', 'cosh', 'tanh',
                  'arcsinh',
                  'arccosh',
                  'arctanh',
                  'absolute', 'fabs', 'negative',
                  'floor', 'ceil',
                  'logical_not',
                  'add', 'subtract', 'multiply',
                  'divide', 'true_divide', 'floor_divide',
                  'remainder', 'fmod', 'hypot', 'arctan2',
                  'equal', 'not_equal', 'less_equal', 'greater_equal',
                  'less', 'greater',
                  'logical_and', 'logical_or', 'logical_xor']:
            try:
                uf = getattr(umath, f)
            except AttributeError:
                uf = getattr(fromnumeric, f)
            mf = getattr(np.ma, f)
            args = self.d[:uf.nin]
            with np.errstate():
                if f in f_invalid_ignore:
                    np.seterr(invalid='ignore')
                if f in ['arctanh', 'log', 'log10']:
                    np.seterr(divide='ignore')
                ur = uf(*args)
                mr = mf(*args)
            assert_(eq(ur.filled(0), mr.filled(0), f))
            assert_(eqmask(ur.mask, mr.mask)) 
Example #26
Source File: faster_gru.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def classic_call(self, h, x):
        r = sigmoid.sigmoid(self.W_r(x) + self.U_r(h))
        z = sigmoid.sigmoid(self.W_z(x) + self.U_z(h))
        h_bar = tanh.tanh(self.W(x) + self.U(r * h))
        h_new = (1 - z) * h + z * h_bar
        return h_new 
Example #27
Source File: nonlinearities.py    From nsf with MIT License 5 votes vote down vote up
def forward(self, inputs, context=None):
        outputs = torch.tanh(inputs)
        logabsdet = torch.log(1 - outputs ** 2)
        logabsdet = utils.sum_except_batch(logabsdet, num_batch_dims=1)
        return outputs, logabsdet 
Example #28
Source File: nonlinearities.py    From nsf with MIT License 5 votes vote down vote up
def __init__(self, cut_point=1):
        if cut_point <= 0:
            raise ValueError('Cut point must be positive.')
        super().__init__()

        self.cut_point = cut_point
        self.inv_cut_point = np.tanh(cut_point)

        self.alpha = (1 - np.tanh(np.tanh(cut_point))) / cut_point
        self.beta = np.exp((np.tanh(cut_point) - self.alpha * np.log(cut_point))
                           / self.alpha) 
Example #29
Source File: adversarial_autoencoder.py    From linguistic-style-transfer with Apache License 2.0 5 votes vote down vote up
def get_annealed_weight(self, iteration, lambda_weight):
        return (np.tanh(
            (iteration - mconf.kl_anneal_iterations * 1.5) /
            (mconf.kl_anneal_iterations / 3))
                + 1) * lambda_weight 
Example #30
Source File: test_old_ma.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_testUfuncs1(self):
        # Test various functions such as sin, cos.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        assert_(eq(np.cos(x), cos(xm)))
        assert_(eq(np.cosh(x), cosh(xm)))
        assert_(eq(np.sin(x), sin(xm)))
        assert_(eq(np.sinh(x), sinh(xm)))
        assert_(eq(np.tan(x), tan(xm)))
        assert_(eq(np.tanh(x), tanh(xm)))
        with np.errstate(divide='ignore', invalid='ignore'):
            assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
            assert_(eq(np.log(abs(x)), log(xm)))
            assert_(eq(np.log10(abs(x)), log10(xm)))
        assert_(eq(np.exp(x), exp(xm)))
        assert_(eq(np.arcsin(z), arcsin(zm)))
        assert_(eq(np.arccos(z), arccos(zm)))
        assert_(eq(np.arctan(z), arctan(zm)))
        assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
        assert_(eq(np.absolute(x), absolute(xm)))
        assert_(eq(np.equal(x, y), equal(xm, ym)))
        assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
        assert_(eq(np.less(x, y), less(xm, ym)))
        assert_(eq(np.greater(x, y), greater(xm, ym)))
        assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
        assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
        assert_(eq(np.conjugate(x), conjugate(xm)))
        assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
        assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
        assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
        assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))