Python numpy.fabs() Examples

The following are 30 code examples for showing how to use numpy.fabs(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: pyGSTi   Author: pyGSTio   File: reportableqty.py    License: Apache License 2.0 6 votes vote down vote up
def absdiff(self, constant_value, separate_re_im=False):
        """
        Returns a ReportableQty that is the (element-wise in the vector case)
        difference between `constant_value` and this one given by:

        `abs(self - constant_value)`.
        """
        if separate_re_im:
            re_v = _np.fabs(_np.real(self.value) - _np.real(constant_value))
            im_v = _np.fabs(_np.imag(self.value) - _np.imag(constant_value))
            if self.has_eb():
                return (ReportableQty(re_v, _np.fabs(_np.real(self.errbar)), self.nonMarkovianEBs),
                        ReportableQty(im_v, _np.fabs(_np.imag(self.errbar)), self.nonMarkovianEBs))
            else:
                return ReportableQty(re_v), ReportableQty(im_v)

        else:
            v = _np.absolute(self.value - constant_value)
            if self.has_eb():
                return ReportableQty(v, _np.absolute(self.errbar), self.nonMarkovianEBs)
            else:
                return ReportableQty(v) 
Example 2
Project: recruit   Author: Frank-qlu   File: test_datetimelike.py    License: Apache License 2.0 6 votes vote down vote up
def test_irreg_hf(self):
        idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
        df = DataFrame(np.random.randn(len(idx), 2), idx)

        irreg = df.iloc[[0, 1, 3, 4]]
        _, ax = self.plt.subplots()
        irreg.plot(ax=ax)
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()

        sec = 1. / 24 / 60 / 60
        assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()

        _, ax = self.plt.subplots()
        df2 = df.copy()
        df2.index = df.index.astype(object)
        df2.plot(ax=ax)
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
        assert (np.fabs(diffs[1:] - sec) < 1e-8).all() 
Example 3
Project: gmpe-smtk   Author: GEMScienceTools   File: intensity_measures.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def rotipp(acceleration_x, time_step_x, acceleration_y, time_step_y, periods,
        percentile, damping=0.05, units="cm/s/s", method="Nigam-Jennings"):
    """
    Returns the rotationally independent spectrum RotIpp as defined by
    Boore (2010)
    """
    if np.fabs(time_step_x - time_step_y) > 1E-10:
        raise ValueError("Record pair must have the same time-step!")
    acceleration_x, acceleration_y = equalise_series(acceleration_x,
                                                     acceleration_y)
    target, rota, rotv, rotd, angles = rotdpp(acceleration_x, time_step_x,
                                              acceleration_y, time_step_y,
                                              periods, percentile, damping,
                                              units, method)
    locn, penalty = _get_gmrotd_penalty(
        np.hstack([target["PGA"],target["Pseudo-Acceleration"]]),
        rota)
    target_theta = np.radians(angles[locn])
    arotpp = acceleration_x * np.cos(target_theta) +\
        acceleration_y * np.sin(target_theta)
    spec = get_response_spectrum(arotpp, time_step_x, periods, damping, units,
        method)[0]
    spec["GMRot{:2.0f}".format(percentile)] = target
    return spec 
Example 4
Project: NiaPy   Author: NiaOrg   File: sca.py    License: MIT License 6 votes vote down vote up
def nextPos(self, x, x_b, r1, r2, r3, r4, task):
		r"""Move individual to new position in search space.

		Args:
			x (numpy.ndarray): Individual represented with components.
			x_b (nmppy.ndarray): Best individual represented with components.
			r1 (float): Number dependent on algorithm iteration/generations.
			r2 (float): Random number in range of 0 and 2 * PI.
			r3 (float): Random number in range [Rmin, Rmax].
			r4 (float): Random number in range [0, 1].
			task (Task): Optimization task.

		Returns:
			numpy.ndarray: New individual that is moved based on individual ``x``.
		"""
		return task.repair(x + r1 * (sin(r2) if r4 < 0.5 else cos(r2)) * fabs(r3 * x_b - x), self.Rand) 
Example 5
Project: vnpy_crypto   Author: birforce   File: norms.py    License: MIT License 6 votes vote down vote up
def rho(self, z):
        r"""
        The robust criterion function for Huber's t.

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        rho : array
            rho(z) = .5*z**2            for \|z\| <= t

            rho(z) = \|z\|*t - .5*t**2    for \|z\| > t
        """
        z = np.asarray(z)
        test = self._subset(z)
        return (test * 0.5 * z**2 +
                (1 - test) * (np.fabs(z) * self.t - 0.5 * self.t**2)) 
Example 6
Project: vnpy_crypto   Author: birforce   File: norms.py    License: MIT License 6 votes vote down vote up
def weights(self, z):
        """
        Huber's t weighting function for the IRLS algorithm

        The psi function scaled by z

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        weights : array
            weights(z) = 1          for \|z\| <= t

            weights(z) = t/\|z\|      for \|z\| > t
        """
        z = np.asarray(z)
        test = self._subset(z)
        absz = np.fabs(z)
        absz[test] = 1.0
        return test + (1 - test) * self.t / absz 
Example 7
Project: vnpy_crypto   Author: birforce   File: norms.py    License: MIT License 6 votes vote down vote up
def rho(self, z):
        r"""
        The robust criterion function for Ramsay's Ea.

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        rho : array
            rho(z) = a**-2 * (1 - exp(-a*\|z\|)*(1 + a*\|z\|))
        """
        z = np.asarray(z)
        return (1 - np.exp(-self.a * np.fabs(z)) *
                (1 + self.a * np.fabs(z))) / self.a**2 
Example 8
Project: vnpy_crypto   Author: birforce   File: norms.py    License: MIT License 6 votes vote down vote up
def psi(self, z):
        """
        The psi function for Ramsay's Ea estimator

        The analytic derivative of rho

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        psi : array
            psi(z) = z*exp(-a*\|z\|)
        """
        z = np.asarray(z)
        return z * np.exp(-self.a * np.fabs(z)) 
Example 9
Project: vnpy_crypto   Author: birforce   File: norms.py    License: MIT License 6 votes vote down vote up
def weights(self, z):
        """
        Ramsay's Ea weighting function for the IRLS algorithm

        The psi function scaled by z

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        weights : array
            weights(z) = exp(-a*\|z\|)
        """

        z = np.asarray(z)
        return np.exp(-self.a * np.fabs(z)) 
Example 10
Project: vnpy_crypto   Author: birforce   File: scale.py    License: MIT License 6 votes vote down vote up
def __call__(self, df_resid, nobs, resid):
        h = (df_resid)/nobs*(self.d**2 + (1-self.d**2)*\
                    Gaussian.cdf(self.d)-.5 - self.d/(np.sqrt(2*np.pi))*\
                    np.exp(-.5*self.d**2))
        s = mad(resid)
        subset = lambda x: np.less(np.fabs(resid/x),self.d)
        chi = lambda s: subset(s)*(resid/s)**2/2+(1-subset(s))*(self.d**2/2)
        scalehist = [np.inf,s]
        niter = 1
        while (np.abs(scalehist[niter-1] - scalehist[niter])>self.tol \
                and niter < self.maxiter):
            nscale = np.sqrt(1/(nobs*h)*np.sum(chi(scalehist[-1]))*\
                    scalehist[-1]**2)
            scalehist.append(nscale)
            niter += 1
            #if niter == self.maxiter:
            #    raise ValueError("Huber's scale failed to converge")
        return scalehist[-1] 
Example 11
Project: vnpy_crypto   Author: birforce   File: mixed.py    License: MIT License 6 votes vote down vote up
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4):
        '''convergence check for iterative estimation

        '''

        self.dev, old = self.deviance(ML=ML), self.dev

        #self.history.append(np.hstack((self.dev, self.a)))
        self.history['llf'].append(self.dev)
        self.history['params'].append(self.a.copy())
        self.history['D'].append(self.D.copy())

        if np.fabs((self.dev - old) / self.dev) < rtol:   #why is there times `*`?
            #print np.fabs((self.dev - old)), self.dev, old
            self.termination = 'llf'
            return False

        #break if parameters converged
        #TODO: check termination conditions, OR or AND
        if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)):
            self.termination = 'params'
            return False

        self._a_old =  self.a.copy()
        return True 
Example 12
Project: vnpy_crypto   Author: birforce   File: test_datetimelike.py    License: MIT License 6 votes vote down vote up
def test_irreg_hf(self):
        idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
        df = DataFrame(np.random.randn(len(idx), 2), idx)

        irreg = df.iloc[[0, 1, 3, 4]]
        _, ax = self.plt.subplots()
        irreg.plot(ax=ax)
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()

        sec = 1. / 24 / 60 / 60
        assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()

        _, ax = self.plt.subplots()
        df2 = df.copy()
        df2.index = df.index.astype(object)
        df2.plot(ax=ax)
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
        assert (np.fabs(diffs[1:] - sec) < 1e-8).all() 
Example 13
Project: vnpy_crypto   Author: birforce   File: nanops.py    License: MIT License 6 votes vote down vote up
def _wrap_results(result, dtype):
    """ wrap our results if needed """

    if is_datetime64_dtype(dtype):
        if not isinstance(result, np.ndarray):
            result = tslib.Timestamp(result)
        else:
            result = result.view(dtype)
    elif is_timedelta64_dtype(dtype):
        if not isinstance(result, np.ndarray):

            # raise if we have a timedelta64[ns] which is too large
            if np.fabs(result) > _int64_max:
                raise ValueError("overflow in timedelta operation")

            result = tslib.Timedelta(result, unit='ns')
        else:
            result = result.astype('i8').view(dtype)

    return result 
Example 14
Project: Attentional-PointNet   Author: anshulpaigwar   File: kitti_evaluation.py    License: GNU General Public License v3.0 6 votes vote down vote up
def lidar_to_img(points, img_size):
    # pdb.set_trace()
    lidar_data = np.array(points[:, :2])
    lidar_data *= 9.9999
    lidar_data -= (0.5 * img_size, 0.5 * img_size)
    lidar_data = np.fabs(lidar_data)
    lidar_data = lidar_data.astype(np.int32)
    lidar_data = np.reshape(lidar_data, (-1, 2))
    lidar_img = np.zeros((img_size, img_size))
    lidar_img[tuple(lidar_data.T)] = 255
    return torch.tensor(lidar_img).cuda()


# def lidar_to_img(points, img_size):
#     # pdb.set_trace()
#     lidar_data = points[:, :2]
#     lidar_data *= 9.9999
#     lidar_data -= torch.tensor((0.5 * img_size, 0.5 * img_size)).cuda()
#     lidar_data = torch.abs(lidar_data)
#     lidar_data = torch.floor(lidar_data).long()
#     lidar_data = lidar_data.view(-1, 2)
#     lidar_img = torch.zeros((img_size, img_size)).cuda()
#     lidar_img[lidar_data.permute(1,0)] = 255
#     return lidar_img 
Example 15
def lidar_to_heightmap(points, img_size):
    # pdb.set_trace()
    lidar_data = np.array(points[:, :2])
    height_data = np.array(points[:,2])
    height_data *= 255/2
    height_data[height_data < 0] = 0
    height_data[height_data > 255] = 255
    height_data = np.fabs(height_data)
    height_data = height_data.astype(np.int32)


    lidar_data *= 9.9999
    lidar_data -= (0.5 * img_size, 0.5 * img_size)
    lidar_data = np.fabs(lidar_data)
    lidar_data = lidar_data.astype(np.int32)
    lidar_data = np.reshape(lidar_data, (-1, 2))
    lidar_img = np.zeros((img_size, img_size))
    lidar_img[tuple(lidar_data.T)] = height_data # TODO: sort the point wrt height first lex sort
    return lidar_img 
Example 16
Project: graphics   Author: tensorflow   File: test_case.py    License: Apache License 2.0 6 votes vote down vote up
def _max_error(arrays1, arrays2):
  """Computes maximum elementwise gap between two lists of ndarrays.

  Computes the maximum elementwise gap between two lists with the same length,
  of arrays with the same shape.

  Args:
    arrays1: a lists of np.ndarrays.
    arrays2: a lists of np.ndarrays of the same shape as arrays1.

  Returns:
    The maximum elementwise absolute difference between the two lists of arrays.
  """
  error = 0
  for array1, array2 in zip(arrays1, arrays2):
    if array1.size or array2.size:  # Handle zero size ndarrays correctly
      error = np.maximum(error, np.fabs(array1 - array2).max())
  return error 
Example 17
Project: Computable   Author: ktraunmueller   File: test_plotting.py    License: MIT License 6 votes vote down vote up
def test_irreg_hf(self):
        import matplotlib.pyplot as plt
        fig = plt.gcf()
        plt.clf()
        fig.add_subplot(111)

        idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
        df = DataFrame(np.random.randn(len(idx), 2), idx)

        irreg = df.ix[[0, 1, 3, 4]]
        ax = irreg.plot()
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()

        sec = 1. / 24 / 60 / 60
        self.assert_((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all())

        plt.clf()
        fig.add_subplot(111)
        df2 = df.copy()
        df2.index = df.index.asobject
        ax = df2.plot()
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
        self.assert_((np.fabs(diffs[1:] - sec) < 1e-8).all()) 
Example 18
Project: ms_deisotope   Author: mobiusklein   File: shape_fitter.py    License: Apache License 2.0 6 votes vote down vote up
def find_right_intersect(vec, target_val, start_index=0):
        nearest_index = start_index
        next_index = start_index

        size = len(vec) - 1
        if next_index == size:
            return size

        next_val = vec[next_index]
        best_distance = np.abs(next_val - target_val)
        while (next_index < size):
            next_index += 1
            next_val = vec[next_index]
            dist = np.fabs(next_val - target_val)  # pylint: disable=assignment-from-no-return
            if dist < best_distance:
                best_distance = dist
                nearest_index = next_index
            if next_index == size or next_val < target_val:
                break
        return nearest_index 
Example 19
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: solver.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, interval, level=logging.DEBUG, stat=None):
        self.interval = interval
        self.level = level
        if stat is None:
            def mean_abs(x):
                return np.fabs(x).mean()
            self.stat = mean_abs
        else:
            self.stat = stat 
Example 20
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: solver.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, interval, level=logging.DEBUG, stat=None):
        self.interval = interval
        self.level = level
        if stat is None:
            def mean_abs(x):
                return np.fabs(x).mean()
            self.stat = mean_abs
        else:
            self.stat = stat 
Example 21
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_evaluate.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def evaluate_params(evaluateFunc, params, objectiveParams, urdfRoot='', timeStep=0.01, maxNumSteps=10000, sleepTime=0):
  print('start evaluation')
  beforeTime = time.time()
  p.resetSimulation()

  p.setTimeStep(timeStep)
  p.loadURDF("%s/plane.urdf" % urdfRoot)
  p.setGravity(0,0,-10)

  global minitaur
  minitaur = Minitaur(urdfRoot)
  start_position = current_position()
  last_position = None  # for tracing line
  total_energy = 0

  for i in range(maxNumSteps):
    torques = minitaur.getMotorTorques()
    velocities = minitaur.getMotorVelocities()
    total_energy += np.dot(np.fabs(torques), np.fabs(velocities)) * timeStep

    joint_values = evaluate_func_map[evaluateFunc](i, params)
    minitaur.applyAction(joint_values)
    p.stepSimulation()
    if (is_fallen()):
      break

    if i % 100 == 0:
      sys.stdout.write('.')
      sys.stdout.flush()
    time.sleep(sleepTime)

  print(' ')

  alpha = objectiveParams[0]
  final_distance = np.linalg.norm(start_position - current_position())
  finalReturn = final_distance - alpha * total_energy
  elapsedTime = time.time() - beforeTime
  print ("trial for ", params, " final_distance", final_distance, "total_energy", total_energy, "finalReturn", finalReturn, "elapsed_time", elapsedTime)
  return finalReturn 
Example 22
Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _transformation_shift_linear(value, shift=0.35):
    return correct_to_01(np.fabs(value - shift) / np.fabs(np.floor(shift - value) + shift)) 
Example 23
Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _transformation_shift_deceptive(y, A=0.35, B=0.005, C=0.05):
    tmp1 = np.floor(y - A + B) * (1.0 - C + (A - B) / B) / (A - B)
    tmp2 = np.floor(A + B - y) * (1.0 - C + (1.0 - A - B) / B) / (1.0 - A - B)
    ret = 1.0 + (np.fabs(y - A) - B) * (tmp1 + tmp2 + 1.0 / B)
    return correct_to_01(ret) 
Example 24
Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _transformation_shift_multi_modal(y, A, B, C):
    tmp1 = np.fabs(y - C) / (2.0 * (np.floor(C - y) + C))
    tmp2 = (4.0 * A + 2.0) * np.pi * (0.5 - tmp1)
    ret = (1.0 + np.cos(tmp2) + 4.0 * B * np.power(tmp1, 2.0)) / (B + 2.0)
    return correct_to_01(ret) 
Example 25
Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _transformation_param_dependent(y, y_deg, A=0.98 / 49.98, B=0.02, C=50.0):
    aux = A - (1.0 - 2.0 * y_deg) * np.fabs(np.floor(0.5 - y_deg) + A)
    ret = np.power(y, B + (C - B) * aux)
    return correct_to_01(ret) 
Example 26
Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _transformation_param_deceptive(y, A=0.35, B=0.001, C=0.05):
    tmp1 = np.floor(y - A + B) * (1.0 - C + (A - B) / B) / (A - B)
    tmp2 = np.floor(A + B - y) * (1.0 - C + (1.0 - A - B) / B) / (1.0 - A - B)
    ret = 1.0 + (np.fabs(y - A) - B) * (tmp1 + tmp2 + 1.0 / B)
    return correct_to_01(ret)


# ---------------------------------------------------------------------------------------------------------
# REDUCTION
# --------------------------------------------------------------------------------------------------------- 
Example 27
Project: recruit   Author: Frank-qlu   File: test_replace.py    License: Apache License 2.0 5 votes vote down vote up
def test_replace2(self):
        N = 100
        ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
                        dtype=object)
        ser[:5] = np.nan
        ser[6:10] = 'foo'
        ser[20:30] = 'bar'

        # replace list with a single value
        rs = ser.replace([np.nan, 'foo', 'bar'], -1)

        assert (rs[:5] == -1).all()
        assert (rs[6:10] == -1).all()
        assert (rs[20:30] == -1).all()
        assert (pd.isna(ser[:5])).all()

        # replace with different values
        rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})

        assert (rs[:5] == -1).all()
        assert (rs[6:10] == -2).all()
        assert (rs[20:30] == -3).all()
        assert (pd.isna(ser[:5])).all()

        # replace with different values with 2 lists
        rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
        tm.assert_series_equal(rs, rs2)

        # replace inplace
        ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
        assert (ser[:5] == -1).all()
        assert (ser[6:10] == -1).all()
        assert (ser[20:30] == -1).all() 
Example 28
Project: recruit   Author: Frank-qlu   File: nanops.py    License: Apache License 2.0 5 votes vote down vote up
def _wrap_results(result, dtype, fill_value=None):
    """ wrap our results if needed """

    if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
        if fill_value is None:
            # GH#24293
            fill_value = iNaT
        if not isinstance(result, np.ndarray):
            tz = getattr(dtype, 'tz', None)
            assert not isna(fill_value), "Expected non-null fill_value"
            if result == fill_value:
                result = np.nan
            result = tslibs.Timestamp(result, tz=tz)
        else:
            result = result.view(dtype)
    elif is_timedelta64_dtype(dtype):
        if not isinstance(result, np.ndarray):
            if result == fill_value:
                result = np.nan

            # raise if we have a timedelta64[ns] which is too large
            if np.fabs(result) > _int64_max:
                raise ValueError("overflow in timedelta operation")

            result = tslibs.Timedelta(result, unit='ns')
        else:
            result = result.astype('i8').view(dtype)

    return result 
Example 29
Project: PostProcessingPlugin   Author: nallath   File: Stretch.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def wideTurn(self, orig_seq, modif_seq):
        '''
        We have to select three points in order to form a triangle
        These three points should be far enough from each other to have
        a reliable estimation of the orientation of the current turn
        '''
        dmin_tri = self.line_width / 2.0
        ibeg = 0
        iend = 2
        for i in range(1, len(orig_seq) - 1):
            dist_from_point = ((orig_seq[i] - orig_seq[i+1:]) ** 2).sum(1)
            if np.amax(dist_from_point) < dmin_tri * dmin_tri:
                continue
            iend = i + 1 + np.argmax(dist_from_point >= dmin_tri * dmin_tri)
            dist_from_point = ((orig_seq[i] - orig_seq[i-1::-1]) ** 2).sum(1)
            if np.amax(dist_from_point) < dmin_tri * dmin_tri:
                continue
            ibeg = i - 1 - np.argmax(dist_from_point >= dmin_tri * dmin_tri)
            length_base = ((orig_seq[iend] - orig_seq[ibeg]) ** 2).sum(0)
            relpos = ((orig_seq[i] - orig_seq[ibeg]) * (orig_seq[iend] - orig_seq[ibeg])).sum(0)
            if np.fabs(relpos) < 1000.0 * np.fabs(length_base):
                relpos /= length_base
            else:
                relpos = 0.5
            projection = orig_seq[ibeg] + relpos * (orig_seq[iend] - orig_seq[ibeg])
            dist_from_proj = np.sqrt(((projection - orig_seq[i]) ** 2).sum(0))
            if dist_from_proj > 0.001:
                modif_seq[i] = (orig_seq[i] - (self.wc_stretch / dist_from_proj)
                                * (projection - orig_seq[i]))
        return 
Example 30
Project: gmpe-smtk   Author: GEMScienceTools   File: configure.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, magnitude, dip, aspect,
                 tectonic_region='Active Shallow Crust', rake=0., ztor=0.,
                 strike=0., msr=WC1994(), initial_point=DEFAULT_POINT,
                 hypocentre_location=None):
        """
        Instantiate the rupture - requires a minimum of a magnitude, dip
        and aspect ratio
        """
        self.magnitude = magnitude
        self.dip = dip
        self.aspect = aspect
        self.rake = rake
        self.strike = strike
        self.location = initial_point
        self.ztor = ztor
        self.trt = tectonic_region
        self.hypo_loc = hypocentre_location
        # If the top of rupture depth in the initial
        if fabs(self.location.depth - self.ztor) > 1E-9:
            self.location.depth = ztor
        self.msr = msr
        self.area = self.msr.get_median_area(self.magnitude, self.rake)
        self.surface = create_planar_surface(self.location,
                                             self.strike,
                                             self.dip,
                                             self.area,
                                             self.aspect)
        self.hypocentre = get_hypocentre_on_planar_surface(self.surface,
                                                           self.hypo_loc)
        self.rupture = self.get_rupture()
        self.target_sites_config = None
        self.target_sites = None