Python numpy.inf() Examples
The following are 30
code examples of numpy.inf().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: gaussian_moments.py From DOTA_models with Apache License 2.0 | 7 votes |
def _compute_delta(log_moments, eps): """Compute delta for given log_moments and eps. Args: log_moments: the log moments of privacy loss, in the form of pairs of (moment_order, log_moment) eps: the target epsilon. Returns: delta """ min_delta = 1.0 for moment_order, log_moment in log_moments: if moment_order == 0: continue if math.isinf(log_moment) or math.isnan(log_moment): sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order) continue if log_moment < moment_order * eps: min_delta = min(min_delta, math.exp(log_moment - moment_order * eps)) return min_delta
Example #2
Source File: gaussian_moments.py From DOTA_models with Apache License 2.0 | 7 votes |
def _compute_eps(log_moments, delta): """Compute epsilon for given log_moments and delta. Args: log_moments: the log moments of privacy loss, in the form of pairs of (moment_order, log_moment) delta: the target delta. Returns: epsilon """ min_eps = float("inf") for moment_order, log_moment in log_moments: if moment_order == 0: continue if math.isinf(log_moment) or math.isnan(log_moment): sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order) continue min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order) return min_eps
Example #3
Source File: _op_translations.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def convert_clip(node, **kwargs): """Map MXNet's Clip operator attributes to onnx's Clip operator and return the created node. """ onnx = import_onnx_modules() name = node["name"] input_idx = kwargs["index_lookup"][node["inputs"][0][0]] proc_nodes = kwargs["proc_nodes"] input_node = proc_nodes[input_idx].name attrs = node["attrs"] a_min = np.float(attrs.get('a_min', -np.inf)) a_max = np.float(attrs.get('a_max', np.inf)) clip_node = onnx.helper.make_node( "Clip", [input_node], [name], name=name, min=a_min, max=a_max ) return [clip_node]
Example #4
Source File: objective.py From OpenFermion-Cirq with Apache License 2.0 | 6 votes |
def noise_bounds(self, cost: float, confidence: Optional[float]=None ) -> Tuple[float, float]: """Exact or approximate bounds on noise. Returns a tuple (a, b) such that when `noise` is called with the given cost, the returned value lies between a and b. It should be the case that a <= 0 <= b. This function takes an optional `confidence` parameter which is a real number strictly between 0 and 1 that gives the probability of the bounds being correct. This is used for situations in which exact bounds on the noise cannot be guaranteed. """ return -numpy.inf, numpy.inf
Example #5
Source File: black_box.py From OpenFermion-Cirq with Apache License 2.0 | 6 votes |
def noise_bounds(self, cost: float, confidence: Optional[float]=None ) -> Tuple[float, float]: """Exact or approximate bounds on noise in the objective function. Returns a tuple (a, b) such that when `evaluate_with_cost` is called with the given cost and returns an approximate function value y, the true function value lies in the interval [y + a, y + b]. Thus, it should be the case that a <= 0 <= b. This function takes an optional `confidence` parameter which is a real number strictly between 0 and 1 that gives the confidence level in the bound. This is used for situations in which exact bounds on the noise cannot be guaranteed. The value can be interpreted as the probability that a repeated call to `evaluate_with_cost` with the same cost will return a value within the bounds. """ return -numpy.inf, numpy.inf
Example #6
Source File: minitaur_gym_env_example.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def ResetPoseExample(): """An example that the minitaur stands still using the reset pose.""" steps = 1000 randomizer = (minitaur_env_randomizer.MinitaurEnvRandomizer()) environment = minitaur_gym_env.MinitaurBulletEnv( render=True, leg_model_enabled=False, motor_velocity_limit=np.inf, pd_control_enabled=True, accurate_motor_model_enabled=True, motor_overheat_protection=True, env_randomizer = randomizer, hard_reset=False) action = [math.pi / 2] * 8 for _ in range(steps): _, _, done, _ = environment.step(action) if done: break environment.reset()
Example #7
Source File: experiment.py From Neural-LP with MIT License | 6 votes |
def __init__(self, sess, saver, option, learner, data): self.sess = sess self.saver = saver self.option = option self.learner = learner self.data = data # helpers self.msg_with_time = lambda msg: \ "%s Time elapsed %0.2f hrs (%0.1f mins)" \ % (msg, (time.time() - self.start) / 3600., (time.time() - self.start) / 60.) self.start = time.time() self.epoch = 0 self.best_valid_loss = np.inf self.best_valid_in_top = 0. self.train_stats = [] self.valid_stats = [] self.test_stats = [] self.early_stopped = False self.log_file = open(os.path.join(self.option.this_expsdir, "log.txt"), "w")
Example #8
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_attack_strength(self): """ If clipping is not done at each iteration (not passing clip_min and clip_max to fgm), this attack fails by np.mean(orig_labels == new_labels) == .39. """ x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf, clip_min=0.5, clip_max=0.7, nb_iter=5) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #9
Source File: minitaur_gym_env_example.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def ResetPoseExample(log_path=None): """An example that the minitaur stands still using the reset pose.""" steps = 10000 environment = minitaur_gym_env.MinitaurGymEnv( urdf_version=minitaur_gym_env.DERPY_V0_URDF_VERSION, render=True, leg_model_enabled=False, motor_velocity_limit=np.inf, pd_control_enabled=True, accurate_motor_model_enabled=True, motor_overheat_protection=True, hard_reset=False, log_path=log_path) action = [math.pi / 2] * 8 for _ in range(steps): _, _, done, _ = environment.step(action) time.sleep(1./100.) if done: break
Example #10
Source File: attacks_tfe.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def generate(self, x, **kwargs): """ Generates the adversarial sample for the given input. :param x: The model's inputs. :param eps: (optional float) attack step size (input variation) :param ord: (optional) Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param y: (optional) A tf variable` with the model labels. Only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. Labels should be one-hot-encoded. :param y_target: (optional) A tf variable` with the labels to target. Leave y_target=None if y is also set. Labels should be one-hot-encoded. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value """ # Parse and save attack-specific parameters assert self.parse_params(**kwargs) labels, nb_classes = self.get_or_guess_labels(x, kwargs) return self.fgm(x, labels=labels, targeted=(self.y_target is not None))
Example #11
Source File: test_utils_tf.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_clip_eta_goldilocks(self): # Test that the clipping handles perturbations that are # too small, just right, and too big correctly eta = tf.constant([[2.], [3.], [4.]]) assert eta.dtype == tf.float32, eta.dtype eps = 3. for ord_arg in [np.inf, 1, 2]: for sign in [-1., 1.]: clipped = clip_eta(eta * sign, ord_arg, eps) clipped_value = self.sess.run(clipped) gold = sign * np.array([[2.], [3.], [3.]]) self.assertClose(clipped_value, gold) grad, = tf.gradients(clipped, eta) grad_value = self.sess.run(grad) # Note: the second 1. is debatable (the left-sided derivative # and the right-sided derivative do not match, so formally # the derivative is not defined). This test makes sure that # we at least handle this oddity consistently across all the # argument values we test gold = sign * np.array([[1.], [1.], [0.]]) assert np.allclose(grad_value, gold)
Example #12
Source File: minitaur_randomize_terrain_gym_env_example.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def ResetTerrainExample(): """An example showing resetting random terrain env.""" num_reset = 10 steps = 100 env = minitaur_randomize_terrain_gym_env.MinitaurRandomizeTerrainGymEnv( render=True, leg_model_enabled=False, motor_velocity_limit=np.inf, pd_control_enabled=True) action = [math.pi / 2] * 8 for _ in xrange(num_reset): env.reset() for _ in xrange(steps): _, _, done, _ = env.step(action) if done: break
Example #13
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False): ''' apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed, they are used to scale z. Note that this function can automatically rescale data into log-space if the colormap is a neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the optional argument logrescale=True. ''' zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit) zs = np.asarray(zs, dtype='float') if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap) if logrescale: if vmin is None: vmin = np.log(np.nanmin(zs)) if vmax is None: vmax = np.log(np.nanmax(zs)) mn = np.exp(vmin) u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan) else: if vmin is None: vmin = np.nanmin(zs) if vmax is None: vmax = np.nanmax(zs) u = zdivide(zs - vmin, vmax - vmin, null=np.nan) u[np.isnan(u)] = -np.inf return cmap(u)
Example #14
Source File: accountant.py From DOTA_models with Apache License 2.0 | 6 votes |
def _compute_delta(self, log_moments, eps): """Compute delta for given log_moments and eps. Args: log_moments: the log moments of privacy loss, in the form of pairs of (moment_order, log_moment) eps: the target epsilon. Returns: delta """ min_delta = 1.0 for moment_order, log_moment in log_moments: if math.isinf(log_moment) or math.isnan(log_moment): sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order) continue if log_moment < moment_order * eps: min_delta = min(min_delta, math.exp(log_moment - moment_order * eps)) return min_delta
Example #15
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def cos_edge(f=Ellipsis, width=np.pi, offset=0, scale=1): ''' cos_edge() yields a potential function g(x) that calculates 0 for x < pi/2, 1 for x > pi/2, and 0.5*(1 + cos(pi/2*(1 - x))) for x between -pi/2 and pi/2. The full formulat of the cosine well is, including optional arguments: scale/2 * (1 + cos(pi*(0.5 - (x - offset)/width) The following optional arguments may be given: * width (default: pi) specifies that the frequency of the cos-curve should be pi/width; the width is the distance between the points on the cos-curve with the value of 1. * offset (default: 0) specifies the offset of the minimum value of the coine curve on the x-axis. * scale (default: 1) specifies the height of the cosine well. ''' f = to_potential(f) freq = np.pi/2 (xmn,xmx) = (offset - width/2, offset + width/2) F = piecewise(scale, ((-np.inf, xmn), 0), ((xmn,xmx), scale/2 * (1 + cos(np.pi*(0.5 - (identity - offset)/width))))) if is_const_potential(f): return const_potential(F.value(f.c)) elif is_identity_potential(f): return F else: return compose(F, f)
Example #16
Source File: lfads.py From DOTA_models with Apache License 2.0 | 6 votes |
def __init__(self, num_units, forget_bias=1.0, weight_scale=1.0, clip_value=np.inf, collections=None): """Create a GRU object. Args: num_units: Number of units in the GRU forget_bias (optional): Hack to help learning. weight_scale (optional): weights are scaled by ws/sqrt(#inputs), with ws being the weight scale. clip_value (optional): if the recurrent values grow above this value, clip them. collections (optional): List of additonal collections variables should belong to. """ self._num_units = num_units self._forget_bias = forget_bias self._weight_scale = weight_scale self._clip_value = clip_value self._collections = collections
Example #17
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def sigmoid(f=Ellipsis, mu=0, sigma=1, scale=1, invert=False, normalize=False): ''' sigmoid() yields a potential function that is equivalent to the integral of gaussian(), i.e., the error function, but scaled to match gaussian(). sigmoid(f) is equivalent to compose(sigmoid(), f). All options that are accepted by the gaussian() function are accepted by sigmoid() with the same default values and are handled in an equivalent manner with the exception of the invert option; when a sigmoid is inverted, the function approaches its maximum value at -inf and approaches 0 at inf. Note that because sigmoid() explicitly matches gaussian(), the base formula used is as follows: f(x) = scale * sigma * sqrt(pi/2) * erf((x - mu) / (sqrt(2) * sigma)) k*sig*Sqrt[Pi/2] Erf[(x - mu)/sig/Sqrt[2]] ''' f = to_potential(f) F = erf((f - mu) / (sigma * np.sqrt(2.0))) if invert: F = 1 - F F = np.sqrt(np.pi / 2) * scale * F if normalize: F = F / (np.sqrt(2.0*np.pi) * sigma) return F
Example #18
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def arcsine(x, null=(-np.inf, np.inf)): ''' arcsine(x) is equivalent to asin(x) except that it also works on sparse arrays. The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what value(s) should be assigned when x < -1 or x > 1. If only one number is given, then it is used for both values; otherwise the first value corresponds to <-1 and the second to >1. If null is None, then an error is raised when invalid values are encountered. ''' if sps.issparse(x): x = x.copy() x.data = arcsine(x.data, null=null, rtol=rtol, atol=atol) return x else: x = np.asarray(x) try: (nln,nlp) = null except Exception: (nln,nlp) = (null,null) ii = None if nln is None else np.where(x < -1) jj = None if nlp is None else np.where(x > 1) if ii: x[ii] = 0 if jj: x[jj] = 0 x = np.arcsin(x) if ii: x[ii] = nln if jj: x[jj] = nlp return x
Example #19
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def arccosine(x, null=(-np.inf, np.inf)): ''' arccosine(x) is equivalent to acos(x) except that it also works on sparse arrays. The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what value(s) should be assigned when x < -1 or x > 1. If only one number is given, then it is used for both values; otherwise the first value corresponds to <-1 and the second to >1. If null is None, then an error is raised when invalid values are encountered. ''' if sps.issparse(x): x = x.toarray() else: x = np.asarray(x) try: (nln,nlp) = null except Exception: (nln,nlp) = (null,null) ii = None if nln is None else np.where(x < -1) jj = None if nlp is None else np.where(x > 1) if ii: x[ii] = 0 if jj: x[jj] = 0 x = np.arccos(x) if ii: x[ii] = nln if jj: x[jj] = nlp return x
Example #20
Source File: utils.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def rollout(env, agent, max_path_length=np.inf): reward = [] o = env.reset() # agent.reset() path_length = 0 while path_length < max_path_length: o = o.reshape((1, -1)) a = agent.get_action(o) next_o, r, d, _ = env.step(a) reward.append(r) path_length += 1 if d: break o = next_o return reward
Example #21
Source File: suba.py From libTLDA with MIT License | 6 votes |
def is_pos_def(self, A): """ Check for positive definiteness. Parameters --------- A : array square symmetric matrix. Returns ------- bool whether matrix is positive-definite. Warning! Returns false for arrays containing inf or NaN. """ # Check for valid numbers if np.any(np.isnan(A)) or np.any(np.isinf(A)): return False else: return np.all(np.real(np.linalg.eigvals(A)) > 0)
Example #22
Source File: _op_translations.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def clip(attrs, inputs, proto_obj): """Clips (limits) the values in an array.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min', 'max' : 'a_max'}) if 'a_max' not in new_attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf}) if 'a_min' not in new_attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf}) return 'clip', new_attrs, inputs
Example #23
Source File: k_medoids.py From discomll with Apache License 2.0 | 5 votes |
def fit(sim_mat, D_len, cidx): """ Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters """ min_energy = np.inf for j in range(3): # select indices in each sample that maximizes its dimension inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat] cidx = [] energy = 0 # current enengy for i in np.unique(inds): indsi = np.where(inds == i)[0] # find indices for every cluster minind, min_value = 0, 0 for index, idy in enumerate(indsi): if idy in sim_mat: # value = sum([sim_mat[idy].get(idx,0) for idx in indsi]) value = 0 for idx in indsi: value += sim_mat[idy].get(idx, 0) if value < min_value: minind, min_value = index, value energy += min_value cidx.append(indsi[minind]) # new centers if energy < min_energy: min_energy, inds_min, cidx_min = energy, inds, cidx return inds_min, cidx_min # cluster for every instance, medoids indices
Example #24
Source File: panda_nut_assembly.py From robosuite with MIT License | 5 votes |
def _gripper_visualization(self): """ Do any needed visualization here. Overrides superclass implementations. """ # color the gripper site appropriately based on distance to nearest object if self.gripper_visualization: # find closest object square_dist = lambda x: np.sum( np.square(x - self.sim.data.get_site_xpos("grip_site")) ) dists = np.array(list(map(square_dist, self.sim.data.site_xpos))) dists[self.eef_site_id] = np.inf # make sure we don't pick the same site dists[self.eef_cylinder_id] = np.inf ob_dists = dists[ self.object_site_ids ] # filter out object sites we care about min_dist = np.min(ob_dists) ob_id = np.argmin(ob_dists) ob_name = self.object_names[ob_id] # set RGBA for the EEF site here max_dist = 0.1 scaled = (1.0 - min(min_dist / max_dist, 1.)) ** 15 rgba = np.zeros(4) rgba[0] = 1 - scaled rgba[1] = scaled rgba[3] = 0.5 self.sim.model.site_rgba[self.eef_site_id] = rgba
Example #25
Source File: sawyer_pick_place.py From robosuite with MIT License | 5 votes |
def _gripper_visualization(self): """ Do any needed visualization here. Overrides superclass implementations. """ # color the gripper site appropriately based on distance to nearest object if self.gripper_visualization: # find closest object square_dist = lambda x: np.sum( np.square(x - self.sim.data.get_site_xpos("grip_site")) ) dists = np.array(list(map(square_dist, self.sim.data.site_xpos))) dists[self.eef_site_id] = np.inf # make sure we don't pick the same site dists[self.eef_cylinder_id] = np.inf ob_dists = dists[ self.object_site_ids ] # filter out object sites we care about min_dist = np.min(ob_dists) ob_id = np.argmin(ob_dists) ob_name = self.object_names[ob_id] # set RGBA for the EEF site here max_dist = 0.1 scaled = (1.0 - min(min_dist / max_dist, 1.)) ** 15 rgba = np.zeros(4) rgba[0] = 1 - scaled rgba[1] = scaled rgba[3] = 0.5 self.sim.model.site_rgba[self.eef_site_id] = rgba
Example #26
Source File: objective_test.py From OpenFermion-Cirq with Apache License 2.0 | 5 votes |
def test_variational_objective_noise_bounds(): assert test_objective.noise_bounds(100) == (-numpy.inf, numpy.inf)
Example #27
Source File: black_box_test.py From OpenFermion-Cirq with Apache License 2.0 | 5 votes |
def test_black_box_noise_bounds(): black_box = ExampleBlackBox() assert black_box.noise_bounds(100) == (-numpy.inf, numpy.inf)
Example #28
Source File: accountant.py From DOTA_models with Apache License 2.0 | 5 votes |
def _compute_eps(self, log_moments, delta): min_eps = float("inf") for moment_order, log_moment in log_moments: if math.isinf(log_moment) or math.isnan(log_moment): sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order) continue min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order) return min_eps
Example #29
Source File: gym_manipulator_envs.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self): self.robot = Striker() MJCFBaseBulletEnv.__init__(self, self.robot) self._striked = False self._min_strike_dist = np.inf self.strike_threshold = 0.1
Example #30
Source File: test_random.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def test_negative_binomial_generator(): ctx = mx.context.current_context() for dtype in ['float16', 'float32', 'float64']: success_num = 2 success_prob = 0.2 print("ctx=%s, dtype=%s, Success Num=%d:, Success Prob=%g" % (ctx, dtype, success_num, success_prob)) buckets = [(-1.0, 2.5), (2.5, 5.5), (5.5, 8.5), (8.5, np.inf)] probs = [ss.nbinom.cdf(bucket[1], success_num, success_prob) - ss.nbinom.cdf(bucket[0], success_num, success_prob) for bucket in buckets] generator_mx = lambda x: mx.nd.random.negative_binomial(success_num, success_prob, shape=x, ctx=ctx, dtype=dtype).asnumpy() verify_generator(generator=generator_mx, buckets=buckets, probs=probs) generator_mx_same_seed = \ lambda x: np.concatenate( [mx.nd.random.negative_binomial(success_num, success_prob, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy() for _ in range(10)]) verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs) # Also test the Gamm-Poisson Mixture print('Gamm-Poisson Mixture Test:') alpha = 1.0 / success_num mu = (1.0 - success_prob) / success_prob / alpha generator_mx = lambda x: mx.nd.random.generalized_negative_binomial(mu, alpha, shape=x, ctx=ctx, dtype=dtype).asnumpy() verify_generator(generator=generator_mx, buckets=buckets, probs=probs) generator_mx_same_seed = \ lambda x: np.concatenate( [mx.nd.random.generalized_negative_binomial(mu, alpha, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy() for _ in range(10)]) verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)