Python numpy.any() Examples

The following are 30 code examples of numpy.any(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: tcpr.py    From libTLDA with MIT License 7 votes vote down vote up
def add_intercept(self, X):
        """Add 1's to data as last features."""
        # Data shape
        N, D = X.shape

        # Check if there's not already an intercept column
        if np.any(np.sum(X, axis=0) == N):

            # Report
            print('Intercept is not the last feature. Swapping..')

            # Find which column contains the intercept
            intercept_index = np.argwhere(np.sum(X, axis=0) == N)

            # Swap intercept to last
            X = X[:, np.setdiff1d(np.arange(D), intercept_index)]

        # Add intercept as last column
        X = np.hstack((X, np.ones((N, 1))))

        # Append column of 1's to data, and increment dimensionality
        return X, D+1 
Example #2
Source File: leike_ensslin_2019.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def fetch(clobber=False):
    """
    Downloads the 3D dust map of Leike & Ensslin (2019).

    Args:
        clobber (Optional[bool]): If ``True``, any existing file will be
            overwritten, even if it appears to match. If ``False`` (the
            default), ``fetch()`` will attempt to determine if the dataset
            already exists. This determination is not 100\% robust against data
            corruption.
    """
    dest_dir = fname_pattern = os.path.join(data_dir(), 'leike_ensslin_2019')
    fname = os.path.join(dest_dir, 'simple_cube.h5')
    
    # Check if the FITS table already exists
    md5sum = 'f54e01c253453117e3770575bed35078'

    if (not clobber) and fetch_utils.check_md5sum(fname, md5sum):
        print('File appears to exist already. Call `fetch(clobber=True)` '
              'to force overwriting of existing file.')
        return

    # Download from the server
    url = 'https://zenodo.org/record/2577337/files/simple_cube.h5?download=1'
    fetch_utils.download_and_verify(url, md5sum, fname) 
Example #3
Source File: model.py    From aospy with Apache License 2.0 6 votes vote down vote up
def set_grid_data(self):
        """Populate the attrs that hold grid data."""
        if self._grid_data_is_set:
            return
        self._set_mult_grid_attr()
        if not np.any(getattr(self, 'sfc_area', None)):
            try:
                sfc_area = _grid_sfc_area(self.lon, self.lat, self.lon_bounds,
                                          self.lat_bounds)
            except AttributeError:
                sfc_area = _grid_sfc_area(self.lon, self.lat)
            self.sfc_area = sfc_area
        try:
            self.levs_thick = utils.vertcoord.level_thickness(self.level)
        except AttributeError:
            self.level = None
            self.levs_thick = None
        self._grid_data_is_set = True 
Example #4
Source File: suba.py    From libTLDA with MIT License 6 votes vote down vote up
def is_pos_def(self, A):
        """
        Check for positive definiteness.

        Parameters
        ---------
        A : array
            square symmetric matrix.

        Returns
        -------
        bool
            whether matrix is positive-definite.
            Warning! Returns false for arrays containing inf or NaN.


        """
        # Check for valid numbers
        if np.any(np.isnan(A)) or np.any(np.isinf(A)):
            return False

        else:
            return np.all(np.real(np.linalg.eigvals(A)) > 0) 
Example #5
Source File: mrcnn.py    From medicaldetectiontoolkit with Apache License 2.0 6 votes vote down vote up
def test_forward(self, batch, return_masks=True):
        """
        test method. wrapper around forward pass of network without usage of any ground truth information.
        prepares input data for processing and stores outputs in a dictionary.
        :param batch: dictionary containing 'data'
        :param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
        :return: results_dict: dictionary with keys:
               'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
                       [[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
               'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes]
        """
        img = batch['data']
        img = torch.from_numpy(img).float().cuda()
        _, _, _, detections, detection_masks = self.forward(img)
        results_dict = get_results(self.cf, img.shape, detections, detection_masks, return_masks=return_masks)
        return results_dict 
Example #6
Source File: ufrcnn.py    From medicaldetectiontoolkit with Apache License 2.0 6 votes vote down vote up
def test_forward(self, batch, return_masks=True):
        """
        test method. wrapper around forward pass of network without usage of any ground truth information.
        prepares input data for processing and stores outputs in a dictionary.
        :param batch: dictionary containing 'data'
        :param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
        :return: results_dict: dictionary with keys:
               'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
                       [[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
               'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes]
        """
        img = batch['data']
        img = torch.from_numpy(img).float().cuda()
        _, _, _, detections, seg_logits = self.forward(img)
        results_dict = get_results(self.cf, img.shape, detections, seg_logits)
        return results_dict 
Example #7
Source File: sfd.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def query(self, coords, order=1):
        """
        Returns the map value at the specified location(s) on the sky.

        Args:
            coords (`astropy.coordinates.SkyCoord`): The coordinates to query.
            order (Optional[int]): Interpolation order to use. Defaults to `1`,
                for linear interpolation.

        Returns:
            A float array containing the map value at every input coordinate.
            The shape of the output will be the same as the shape of the
            coordinates stored by `coords`.
        """
        out = np.full(len(coords.l.deg), np.nan, dtype='f4')

        for pole in self.poles:
            m = (coords.b.deg >= 0) if pole == 'ngp' else (coords.b.deg < 0)

            if np.any(m):
                data, w = self._data[pole]
                x, y = w.wcs_world2pix(coords.l.deg[m], coords.b.deg[m], 0)
                out[m] = map_coordinates(data, [y, x], order=order, mode='nearest')

        return out 
Example #8
Source File: test_OpticalSystem.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_init_occulter(self):
        r"""Test of initialization and __init__ -- occulter.

        Method: If any starlight suppression system has an occulter , the
        attribute OpticalSystem.haveOcculter is set.
        We instantiate OpticalSystem objects and verify that this is done.
        """
        our_specs = deepcopy(specs_default)
        optsys = self.fixture(**deepcopy(our_specs))
        self.assertFalse(optsys.haveOcculter,'Expect to NOT haveOcculter')

        our_specs['starlightSuppressionSystems'][0]['occulter'] = True
        optsys = self.fixture(**deepcopy(our_specs))
        self.assertTrue(optsys.haveOcculter, 'Expect to haveOcculter')

        optsys = self.fixture(**deepcopy(specs_multi))
        self.assertTrue(optsys.haveOcculter, 'Expect to haveOcculter') 
Example #9
Source File: from_arrays.py    From QCElemental with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def validate_and_fill_geometry(geom=None, tooclose=0.1, copy=True):
    """Check `geom` for overlapping atoms. Return flattened"""

    npgeom = np.array(geom, copy=copy, dtype=np.float).reshape((-1, 3))

    # Upper triangular
    metric = tooclose ** 2
    tooclose_inds = []
    for x in range(npgeom.shape[0]):
        diffs = npgeom[x] - npgeom[x + 1 :]
        dists = np.einsum("ij,ij->i", diffs, diffs)

        # Record issues
        if np.any(dists < metric):
            indices = np.where(dists < metric)[0]
            tooclose_inds.extend([(x, y, dist) for y, dist in zip(indices + x + 1, dists[indices] ** 0.5)])

    if tooclose_inds:
        raise ValidationError(
            """Following atoms are too close: {}""".format([(i, j, dist) for i, j, dist in tooclose_inds])
        )

    return {"geom": npgeom.reshape((-1))} 
Example #10
Source File: models_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _testDecoder(self,
                   height=64,
                   width=64,
                   channels=4,
                   batch_norm_params=None,
                   decoder=models.small_decoder):
    codes = tf.to_float(np.random.rand(32, 100))

    with self.test_session() as sess:
      output = decoder(
          codes,
          height=height,
          width=width,
          channels=channels,
          batch_norm_params=batch_norm_params)
      sess.run(tf.global_variables_initializer())
      output_np = sess.run(output)
    self.assertEqual(output_np.shape, (32, height, width, channels))
    self.assertTrue(np.any(output_np))
    self.assertTrue(np.all(np.isfinite(output_np))) 
Example #11
Source File: attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _compute_gradients(self, loss_fn, x, unused_optim_state):
        """Compute a new value of `x` to minimize `loss_fn`.

        Args:
            loss_fn: a callable that takes `x`, a batch of images, and returns
                a batch of loss values. `x` will be optimized to minimize
                `loss_fn(x)`.
            x: A list of Tensors, the values to be updated. This is analogous
                to the `var_list` argument in standard TF Optimizer.
            unused_optim_state: A (possibly nested) dict, containing any state
                info needed for the optimizer.

        Returns:
            new_x: A list of Tensors, the same length as `x`, which are updated
            new_optim_state: A dict, with the same structure as `optim_state`,
                which have been updated.
        """

        # Assumes `x` is a list,
        # and contains a tensor representing a batch of images
        assert len(x) == 1 and isinstance(x, list), \
            'x should be a list and contain only one image tensor'
        x = x[0]
        loss = reduce_mean(loss_fn(x), axis=0)
        return tf.gradients(loss, x) 
Example #12
Source File: dataloader_m.py    From models with MIT License 6 votes vote down vote up
def _prepro_cpg(self, states, dists):
        """Preprocess the state and distance of neighboring CpG sites."""
        prepro_states = []
        prepro_dists = []
        for state, dist in zip(states, dists):
            nan = state == dat.CPG_NAN
            if np.any(nan):
                state[nan] = np.random.binomial(1, state[~nan].mean(),
                                                nan.sum())
                dist[nan] = self.cpg_max_dist
            dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist
            prepro_states.append(np.expand_dims(state, 1))
            prepro_dists.append(np.expand_dims(dist, 1))
        prepro_states = np.concatenate(prepro_states, axis=1)
        prepro_dists = np.concatenate(prepro_dists, axis=1)
        if self.cpg_wlen:
            center = prepro_states.shape[2] // 2
            delta = self.cpg_wlen // 2
            tmp = slice(center - delta, center + delta)
            prepro_states = prepro_states[:, :, tmp]
            prepro_dists = prepro_dists[:, :, tmp]
        return (prepro_states, prepro_dists) 
Example #13
Source File: test_data.py    From mlearn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_convert_docs(self):
        _, df = convert_docs(self.test_pool, include_stress=False)
        test_energies = df[df['dtype'] == 'energy']['y_orig']
        self.assertFalse(np.any(test_energies - self.test_energies))
        test_forces = df[df['dtype'] == 'force']['y_orig']
        for force1, force2 in zip(test_forces, np.array(self.test_forces).ravel()):
            self.assertEqual(force1, force2)

        _, df = convert_docs(self.test_pool, include_stress=True)
        test_energies = df[df['dtype'] == 'energy']['y_orig']
        self.assertFalse(np.any(test_energies - self.test_energies))
        test_forces = df[df['dtype'] == 'force']['y_orig']
        for force1, force2 in zip(test_forces, np.array(self.test_forces).ravel()):
            self.assertEqual(force1, force2)
        test_stresses = df[df['dtype'] == 'stress']['y_orig']
        for stress1, stress2 in zip(test_stresses, np.array(self.test_stresses).ravel()):
            self.assertEqual(stress1, stress2) 
Example #14
Source File: optimizers.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    reg_input, reg_weight, old_values, targets = sess.run(
        [self.inputs, self.regression_weight, self.values, self.targets],
        feed_dict=feed_dict)

    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    # taken from rllab
    reg_coeff = 1e-5
    for _ in range(5):
      best_fit_weight = np.linalg.lstsq(
          reg_input.T.dot(reg_input) +
          reg_coeff * np.identity(reg_input.shape[1]),
          reg_input.T.dot(intended_values))[0]
      if not np.any(np.isnan(best_fit_weight)):
        break
      reg_coeff *= 10

    if len(best_fit_weight.shape) == 1:
      best_fit_weight = np.expand_dims(best_fit_weight, -1)

    sess.run(self.update_regression_weight,
             feed_dict={self.new_regression_weight: best_fit_weight}) 
Example #15
Source File: util.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def point_on_segment(ac, b, atol=1e-8):
    '''
    point_on_segment((a,b), c) yields True if point x is on segment (a,b) and False otherwise. Note
    that this differs from point_in_segment in that a point that if c is equal to a or b it is
    considered 'on' but not 'in' the segment.
    The option atol can be given and is used only to test for difference from 0; by default it is
    1e-8.
    '''
    (a,c) = ac
    abc = [np.asarray(u) for u in (a,b,c)]
    if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
    else:                                  (a,b,c) = abc
    vab = b - a
    vbc = c - b
    vac = c - a
    dab = np.sqrt(np.sum(vab**2, axis=0))
    dbc = np.sqrt(np.sum(vbc**2, axis=0))
    dac = np.sqrt(np.sum(vac**2, axis=0))
    return np.isclose(dab + dbc - dac, 0, atol=atol) 
Example #16
Source File: util.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def point_in_segment(ac, b, atol=1e-8):
    '''
    point_in_segment((a,b), c) yields True if point x is in segment (a,b) and False otherwise. Note
    that this differs from point_on_segment in that a point that if c is equal to a or b it is
    considered 'on' but not 'in' the segment.
    The option atol can be given and is used only to test for difference from 0; by default it is
    1e-8.
    '''
    (a,c) = ac
    abc = [np.asarray(u) for u in (a,b,c)]
    if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
    else:                                  (a,b,c) = abc
    vab = b - a
    vbc = c - b
    vac = c - a
    dab = np.sqrt(np.sum(vab**2, axis=0))
    dbc = np.sqrt(np.sum(vbc**2, axis=0))
    dac = np.sqrt(np.sum(vac**2, axis=0))
    return (np.isclose(dab + dbc - dac, 0, atol=atol) &
            ~np.isclose(dac - dab, 0, atol=atol) &
            ~np.isclose(dac - dbc, 0, atol=atol)) 
Example #17
Source File: DistanceConstraints.py    From fullrmc with GNU Affero General Public License v3.0 6 votes vote down vote up
def should_step_get_rejected(self, standardError):
        """
        Given a standardError, return whether to keep or reject new
        standardError according to the constraint rejectProbability.
        In addition, if flexible flag is set to True, total number of atoms
        not satisfying constraints definition must be decreasing or at least
        remain the same.

        :Parameters:
            #. standardError (number): Standard error to compare with
               Constraint's standard error.

        :Returns:
            #. result (boolean): True to reject step, False to accept.
        """
        if self.__flexible:
            # compute if step should get rejected as a RigidConstraint
            return super(_DistanceConstraint, self).should_step_get_rejected(standardError)
        else:
            cond = self.activeAtomsDataAfterMove["number"]>self.activeAtomsDataBeforeMove["number"]
            if np.any(cond):
                return True
            return False 
Example #18
Source File: online.py    From contextualbandits with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _choose_active(self, X, pred, choose=True):
        if self.prob_active_choice <= 0.:
            return None

        pick_active = self.random_state.random(size=X.shape[0]) <= self.prob_active_choice
        if not np.any(pick_active):
            return None
        by_crit = self._crit_active(
                        X[pick_active],
                        self._oracles.decision_function(X[pick_active]),
                        self.active_choice)
        if choose:
            pred[pick_active] = np.argmax(by_crit, axis = 1)
        else:
            pred[pick_active] = by_crit 
Example #19
Source File: models_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def testBuildGRLMnistModel(self):
    logits = self._testSharedEncoder(model=getattr(models,
                                                   'dann_mnist'))
    self.assertEqual(logits.shape, (5, 10))
    self.assertTrue(np.any(logits)) 
Example #20
Source File: object_detection_evaluation.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc) 
Example #21
Source File: visualization_utils.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def draw_mask_on_image_array(image, mask, color='red', alpha=0.7):
  """Draws mask on an image.

  Args:
    image: uint8 numpy array with shape (img_height, img_height, 3)
    mask: a float numpy array of shape (img_height, img_height) with
      values between 0 and 1
    color: color to draw the keypoints with. Default is red.
    alpha: transparency value between 0 and 1. (default: 0.7)

  Raises:
    ValueError: On incorrect data type for image or masks.
  """
  if image.dtype != np.uint8:
    raise ValueError('`image` not of type np.uint8')
  if mask.dtype != np.float32:
    raise ValueError('`mask` not of type np.float32')
  if np.any(np.logical_or(mask > 1.0, mask < 0.0)):
    raise ValueError('`mask` elements should be in [0, 1]')
  rgb = ImageColor.getrgb(color)
  pil_image = Image.fromarray(image)

  solid_color = np.expand_dims(
      np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
  pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
  pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
  pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
  np.copyto(image, np.array(pil_image.convert('RGB'))) 
Example #22
Source File: data_loader.py    From medicaldetectiontoolkit with Apache License 2.0 5 votes vote down vote up
def get_train_generators(cf, logger):
    """
    wrapper function for creating the training batch generator pipeline. returns the train/val generators.
    selects patients according to cv folds (generated by first run/fold of experiment):
    splits the data into n-folds, where 1 split is used for val, 1 split for testing and the rest for training. (inner loop test set)
    If cf.hold_out_test_set is True, adds the test split to the training data.
    """
    all_data = load_dataset(cf, logger)
    all_pids_list = np.unique([v['pid'] for (k, v) in all_data.items()])

    if not cf.created_fold_id_pickle:
        fg = dutils.fold_generator(seed=cf.seed, n_splits=cf.n_cv_splits, len_data=len(all_pids_list)).get_fold_names()
        with open(os.path.join(cf.exp_dir, 'fold_ids.pickle'), 'wb') as handle:
            pickle.dump(fg, handle)
        cf.created_fold_id_pickle = True
    else:
        with open(os.path.join(cf.exp_dir, 'fold_ids.pickle'), 'rb') as handle:
            fg = pickle.load(handle)

    train_ix, val_ix, test_ix, _ = fg[cf.fold]

    train_pids = [all_pids_list[ix] for ix in train_ix]
    val_pids = [all_pids_list[ix] for ix in val_ix]

    if cf.hold_out_test_set:
        train_pids += [all_pids_list[ix] for ix in test_ix]

    train_data = {k: v for (k, v) in all_data.items() if any(p == v['pid'] for p in train_pids)}
    val_data = {k: v for (k, v) in all_data.items() if any(p == v['pid'] for p in val_pids)}

    logger.info("data set loaded with: {} train / {} val / {} test patients".format(len(train_ix), len(val_ix), len(test_ix)))
    batch_gen = {}
    batch_gen['train'] = create_data_gen_pipeline(train_data, cf=cf, is_training=True)
    batch_gen['val_sampling'] = create_data_gen_pipeline(val_data, cf=cf, is_training=False)
    if cf.val_mode == 'val_patient':
        batch_gen['val_patient'] = PatientBatchIterator(val_data, cf=cf)
        batch_gen['n_val'] = len(val_ix) if cf.max_val_patients is None else min(len(val_ix), cf.max_val_patients)
    else:
        batch_gen['n_val'] = cf.num_val_batches

    return batch_gen 
Example #23
Source File: test_TargetList.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_life_expectancy_filter(self):
        #test default removal of BV < 0.3 (hard-coded)
        self.targetlist.life_expectancy_filter()
        self.assertEqual( np.any(self.targetlist.BV<0.3) , False) 
Example #24
Source File: test_TargetList.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_main_sequence_filter(self):
        n0 = self.targetlist.nStars
        self.targetlist.main_sequence_filter()
        #print self.targetlist.nStars
        #Check that no stars fall outside main sequence strip
        self.assertEqual( np.any((self.targetlist.BV < 0.74) & (self.targetlist.MV > 6*self.targetlist.BV+1.8)) , False)
        self.assertEqual( np.any((self.targetlist.BV >= 0.74) & (self.targetlist.BV < 1.37) & (self.targetlist.MV > 4.3*self.targetlist.BV+3.05)) , False)
        self.assertEqual( np.any((self.targetlist.BV >= 1.37) & (self.targetlist.MV > 18*self.targetlist.BV-15.7)) , False)
        self.assertEqual( np.any((self.targetlist.BV < 0.87) & (self.targetlist.MV < -8*(self.targetlist.BV-1.35)**2+7.01)) , False)
        self.assertEqual( np.any((self.targetlist.BV >= 0.87) & (self.targetlist.BV < 1.45) & (self.targetlist.MV > 5*self.targetlist.BV+0.81)) , False)
        self.assertEqual( np.any((self.targetlist.BV >= 1.45) & (self.targetlist.MV < 18*self.targetlist.BV-18.04)) , False)
        #check that filtered target list does not have repeating elements
        import collections
        compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
        self.assertEqual( compare(list(set(self.targetlist.Name)), list(self.targetlist.Name)) , True) 
Example #25
Source File: tf_utils.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def simple_summaries(summarize_ops, summarize_names, mode, to_aggregate=False,
                     scope_name='summary'):

  if type(to_aggregate) != list:
    to_aggregate = [to_aggregate for _ in summarize_ops]
  
  summary_key = '{:s}_summaries'.format(mode)
  print_summary_key = '{:s}_print_summaries'.format(mode)
  prefix=' [{:s}]: '.format(mode)
  
  # Default ops for things that dont need to be aggregated.
  if not np.all(to_aggregate):
    for op, name, to_agg in zip(summarize_ops, summarize_names, to_aggregate):
      if not to_agg:
        add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix)
    summary_ops = tf.summary.merge_all(summary_key)
    print_summary_ops = tf.summary.merge_all(print_summary_key)
  else:
    summary_ops = tf.no_op()
    print_summary_ops = tf.no_op()
 
  # Default ops for things that dont need to be aggregated.
  if np.any(to_aggregate):
    additional_return_ops = [[summarize_ops[i] 
                              for i, x in enumerate(to_aggregate )if x]]
    arop_summary_iters = [-1]
    s_names = ['{:s}/{:s}'.format(scope_name, summarize_names[i]) 
               for i, x in enumerate(to_aggregate) if x]
    fn = lambda outputs, global_step, output_dir, metric_summary, N: \
      accum_val_ops(outputs, s_names, global_step, output_dir, metric_summary,
                    N)
    arop_eval_fns = [fn]
  else:
    additional_return_ops = []
    arop_summary_iters = []
    arop_eval_fns = []
  return summary_ops, print_summary_ops, additional_return_ops, \
    arop_summary_iters, arop_eval_fns 
Example #26
Source File: util.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def tetrahedral_barycentric_coordinates(tetra, pt):
    '''
    tetrahedral_barycentric_coordinates(tetrahedron, point) yields a list of weights for each vertex
      in the given tetrahedron in the same order as the vertices given. If all weights are 0, then
      the point is not inside the tetrahedron.
    '''
    # I found a description of this algorithm here (Nov. 2017):
    # http://steve.hollasch.net/cgindex/geometry/ptintet.html
    tetra = np.asarray(tetra)
    pt = np.asarray(pt)
    if tetra.shape[0] != 4:
        if tetra.shape[1] == 4:
            if tetra.shape[0] == 3:
                tetra = np.transpose(tetra, (1,0) if len(tetra.shape) == 2 else (1,0,2))
            else:
                tetra = np.transpose(tetra, (1,2,0))
        elif tetra.shape[1] == 3:
            tetra = np.transpose(tetra, (2,1,0))
        else:
            tetra = np.transpose(tetra, (2,0,1))
    elif tetra.shape[1] != 3:
        tetra = np.transpose(tetra, (0,2,1))
    if pt.shape[0] != 3: pt = pt.T
    # Okay, calculate the determinants...
    d_ = det_4x3(tetra[0], tetra[1], tetra[2], tetra[3])
    d0 = det_4x3(pt,       tetra[1], tetra[2], tetra[3])
    d1 = det_4x3(tetra[0], pt,       tetra[2], tetra[3])
    d2 = det_4x3(tetra[0], tetra[1], pt,       tetra[3])
    d3 = det_4x3(tetra[0], tetra[1], tetra[2], pt)
    s_ = np.sign(d_)
    z_ = np.logical_or(np.any([s_ * si == -1 for si in np.sign([d0,d1,d2,d3])], axis=0),
                       np.isclose(d_,0))
    x_ = np.logical_not(z_)
    d_inv = x_ / (x_ * d_ + z_)
    return np.asarray([d_inv * dq for dq in (d0,d1,d2,d3)]) 
Example #27
Source File: tf_utils.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def add_summary_ops(m, summarize_ops, summarize_names, to_aggregate=None,
                    summary_key='summaries',
                    print_summary_key='print_summaries', prefix=''):
  if type(to_aggregate) != list:
    to_aggregate = [to_aggregate for _ in summarize_ops]
  
  # set up aggregating metrics
  if np.any(to_aggregate):
    agg_ops = []
    for op, name, to_agg in zip(summarize_ops, summarize_names, to_aggregate):
      if to_agg:
        # agg_ops.append(slim.metrics.streaming_mean(op, return_reset_op=True))
        agg_ops.append(tf.contrib.metrics.streaming_mean(op))
        # agg_ops.append(tf.contrib.metrics.streaming_mean(op, return_reset_op=True))
      else:
        agg_ops.append([None, None, None])

    # agg_values_op, agg_update_op, agg_reset_op = zip(*agg_ops)
    # agg_update_op = [x for x in agg_update_op if x is not None]
    # agg_reset_op = [x for x in agg_reset_op if x is not None]
    agg_values_op, agg_update_op = zip(*agg_ops)
    agg_update_op = [x for x in agg_update_op if x is not None]
    agg_reset_op  = [tf.no_op()]
  else:
    agg_values_op = [None for _ in to_aggregate]
    agg_update_op = [tf.no_op()]
    agg_reset_op  = [tf.no_op()]

  for op, name, to_agg, agg_op in zip(summarize_ops, summarize_names, to_aggregate, agg_values_op):
    if to_agg:
      add_scalar_summary_op(agg_op, name, summary_key, print_summary_key, prefix)
    else:
      add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix)

  summary_op       = tf.summary.merge_all(summary_key)
  print_summary_op = tf.summary.merge_all(print_summary_key)
  return summary_op, print_summary_op, agg_update_op, agg_reset_op 
Example #28
Source File: replay_memory.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def sample(self, batch_size):
        assert self.size >= batch_size and self.replay_start_size >= self.history_length
        assert(0 <= self.size <= self.memory_size)
        assert(0 <= self.top <= self.memory_size)
        if self.size <= self.replay_start_size:
            raise ValueError("Size of the effective samples of the ReplayMemory must be bigger than "
                             "start_size! Currently, size=%d, start_size=%d"
                             %(self.size, self.replay_start_size))
        #TODO Possibly states + inds for less memory access
        states = numpy.zeros((batch_size, self.history_length) + self.state_dim,
                             dtype=self.states.dtype)
        actions = numpy.zeros((batch_size,) + self.action_dim, dtype=self.actions.dtype)
        rewards = numpy.zeros(batch_size, dtype='float32')
        terminate_flags = numpy.zeros(batch_size, dtype='bool')
        next_states = numpy.zeros((batch_size, self.history_length) + self.state_dim,
                                  dtype=self.states.dtype)
        counter = 0
        while counter < batch_size:
            index = self.rng.randint(low=self.top - self.size + 1, high=self.top - self.history_length)
            transition_indices = numpy.arange(index, index + self.history_length)
            initial_indices = transition_indices - 1
            end_index = index + self.history_length - 1
            while numpy.any(self.terminate_flags.take(initial_indices, mode='wrap')):
                # Check if terminates in the middle of the sample!
                index -= 1
                transition_indices = numpy.arange(index, index + self.history_length)
                initial_indices = transition_indices - 1
                end_index = index + self.history_length - 1
            states[counter] = self.states.take(initial_indices, axis=0, mode='wrap')
            actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
            rewards[counter] = self.rewards.take(end_index, mode='wrap')
            terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
            next_states[counter] = self.states.take(transition_indices, axis=0, mode='wrap')
            counter += 1
        return states, actions, rewards, next_states, terminate_flags 
Example #29
Source File: online.py    From contextualbandits with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _calc_preds(self, X, choose = True):
        pred_proba = self._oracles.decision_function(X)
        pred_max = pred_proba.max(axis = 1)
        if choose:
            pred = np.argmax(pred_proba, axis = 1)
        else:
            pred = pred_proba
        set_greedy = pred_max <= self.thr
        if np.any(set_greedy):
            self._choose_greedy(set_greedy, X, pred, pred_proba, choose)
        return pred, pred_max 
Example #30
Source File: replay_memory.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def sample_inplace(self, batch_size, states, offset):
        assert self.size >= batch_size and self.replay_start_size >= self.history_length
        assert(0 <= self.size <= self.memory_size)
        assert(0 <= self.top <= self.memory_size)
        if self.size <= self.replay_start_size:
            raise ValueError("Size of the effective samples of the ReplayMemory must be "
                             "bigger than start_size! Currently, size=%d, start_size=%d"
                             %(self.size, self.replay_start_size))
        actions = numpy.zeros((batch_size,) + self.action_dim, dtype=self.actions.dtype)
        rewards = numpy.zeros(batch_size, dtype='float32')
        terminate_flags = numpy.zeros(batch_size, dtype='bool')

        counter = 0
        while counter < batch_size:
            index = self.rng.randint(low=self.top - self.size + 1, high=self.top - self.history_length )
            transition_indices = numpy.arange(index, index + self.history_length+1)
            initial_indices = transition_indices - 1
            end_index = index + self.history_length - 1
            if numpy.any(self.terminate_flags.take(initial_indices[0:self.history_length], mode='wrap')):
                # Check if terminates in the middle of the sample!
                continue
            states[counter + offset] = self.states.take(initial_indices, axis=0, mode='wrap')
            actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
            rewards[counter] = self.rewards.take(end_index, mode='wrap')
            terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
            # next_states[counter] = self.states.take(transition_indices, axis=0, mode='wrap')
            counter += 1
        return actions, rewards, terminate_flags