Python numpy.asscalar() Examples

The following are 30 code examples for showing how to use numpy.asscalar(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: vergeml   Author: mme   File: imagenet.py    License: MIT License 6 votes vote down vote up
def predict(self, f, k=5, resize_mode='fill'):
        from keras.preprocessing import image
        from vergeml.img import resize_image

        filename = os.path.basename(f)

        if not os.path.exists(f):
            return dict(filename=filename, prediction=[])

        img = image.load_img(f)
        img = resize_image(img, self.image_size, self.image_size, 'antialias', resize_mode)

        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = self.preprocess_input(x)
        preds = self.model.predict(x)
        pred = self._decode(preds, top=k)[0]
        prediction=[dict(probability=np.asscalar(perc), label=klass) for _, klass, perc in pred]

        return dict(filename=filename, prediction=prediction) 
Example 2
Project: connecting_the_dots   Author: autonomousvision   File: metric.py    License: MIT License 6 votes vote down vote up
def add(self, es, ta, ma=None):
    if ma is not None:
      raise Exception('mask is not implemented')
    es = es.ravel()
    ta = ta.ravel()
    if es.shape[0] != ta.shape[0]:
      raise Exception('invalid shape of es, or ta')
    if es.min() < 0 or es.max() > 1:
      raise Exception('estimate has wrong value range')
    ta_p = (ta == 1)
    ta_n = (ta == 0)
    es_p = es[ta_p]
    es_n = es[ta_n]
    for idx, wp in enumerate(self.thresholds):
      wp = np.asscalar(wp)
      self.tps[idx] += (es_p > wp).sum()
      self.fps[idx] += (es_n > wp).sum()
      self.fns[idx] += (es_p <= wp).sum()
      self.tns[idx] += (es_n <= wp).sum()
    self.n_pos += ta_p.sum()
    self.n_neg += ta_n.sum() 
Example 3
Project: tutorials   Author: facebookarchive   File: Multi-GPU_Training.py    License: Apache License 2.0 6 votes vote down vote up
def accuracy(model):
    accuracy = []
    prefix = model.net.Proto().name
    for device in model._devices:
        accuracy.append(
            np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
    return np.average(accuracy)


# ## Part 11: Run Multi-GPU Training and Get Test Results
# You've come a long way. Now is the time to see it all pay off. Since you already ran ResNet once, you can glance at the code below and run it. The big difference this time is your model is parallelized! 
# 
# The additional components at the end deal with accuracy so you may want to dig into those specifics as a bonus task. You can try it again: just adjust the `num_epochs` value below, run the block, and see the results. You can also go back to Part 10 to reinitialize the model, and run this step again. (You may want to add `workspace.ResetWorkspace()` before you run the new models again.)
# 
# Go back and check the images/sec from when you ran single GPU. Note how you can scale up with a small amount of overhead. 
# 
# ### Task: How many GPUs would it take to train ImageNet in under a minute? 

# In[ ]:


# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy 
Example 4
Project: tutorials   Author: facebookarchive   File: Multi-GPU_Training.py    License: Apache License 2.0 6 votes vote down vote up
def accuracy(model):
    accuracy = []
    prefix = model.net.Proto().name
    for device in model._devices:
        accuracy.append(
            np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
    return np.average(accuracy)


# In[ ]:


# SOLUTION for Part 11

# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy 
Example 5
Project: pase   Author: santi-pdp   File: transforms.py    License: MIT License 6 votes vote down vote up
def __call__(self, wav, srate=16000, nbits=16):
        """ Add noise to clean wav """
        if isinstance(wav, torch.Tensor):
            wav = wav.numpy()
        noise_idx = np.random.choice(list(range(len(self.noises))), 1)
        sel_noise = self.noises[np.asscalar(noise_idx)]
        noise = sel_noise['data']
        snr = np.random.choice(self.snr_levels, 1)
        # print('Applying SNR: {} dB'.format(snr[0]))
        if wav.ndim > 1:
            wav = wav.reshape((-1,))
        noisy, noise_bound = self.addnoise_asl(wav, noise, srate,
                                               nbits, snr,
                                               do_IRS=self.do_IRS)
        # normalize to avoid clipping
        if np.max(noisy) >= 1 or np.min(noisy) < -1:
            small = 0.1
            while np.max(noisy) >= 1 or np.min(noisy) < -1:
                noisy = noisy / (1. + small)
                small = small + 0.1
        return torch.FloatTensor(noisy.astype(np.float32)) 
Example 6
Project: calfem-python   Author: CALFEM   File: core.py    License: MIT License 6 votes vote down vote up
def bar2e(ex,ey,ep):
    """
    Compute the element stiffness matrix for two dimensional bar element.
    
    :param list ex: element x coordinates [x1, x2]
    :param list ey: element y coordinates [y1, y2]
    :param list ep: [E, A]: E - Young's modulus, A - Cross section area
    :return mat Ke: stiffness matrix, [4 x 4]
    """
    E=ep[0]
    A=ep[1]
    
    b = np.mat([[ex[1]-ex[0]],[ey[1]-ey[0]]])
    L = np.asscalar(np.sqrt(b.T*b))
    
    Kle = np.mat([[1.,-1.],[-1.,1.]])*E*A/L
    
    n = np.asarray(b.T/L).reshape(2,)
    
    G = np.mat([
        [n[0],n[1],0.,0.],
        [0.,0.,n[0],n[1]]
    ])
    
    return G.T*Kle*G 
Example 7
Project: ilqr   Author: anassinator   File: cost.py    License: GNU General Public License v3.0 6 votes vote down vote up
def l(self, x, u, i, terminal=False):
        """Instantaneous cost function.

        Args:
            x: Current state [state_size].
            u: Current control [action_size]. None if terminal.
            i: Current time step.
            terminal: Compute terminal cost. Default: False.

        Returns:
            Instantaneous cost (scalar).
        """
        if terminal:
            z = np.hstack([x, i])
            return np.asscalar(self._l_terminal(*z))

        z = np.hstack([x, u, i])
        return np.asscalar(self._l(*z)) 
Example 8
Project: ilqr   Author: anassinator   File: cost.py    License: GNU General Public License v3.0 6 votes vote down vote up
def l(self, x, u, i, terminal=False):
        """Instantaneous cost function.

        Args:
            x: Current state [state_size].
            u: Current control [action_size]. None if terminal.
            i: Current time step.
            terminal: Compute terminal cost. Default: False.

        Returns:
            Instantaneous cost (scalar).
        """
        if terminal:
            return np.asscalar(self._l_term(x, i))

        return np.asscalar(self._l(x, u, i)) 
Example 9
Project: dket   Author: dkmfbk   File: test_data.py    License: GNU General Public License v3.0 6 votes vote down vote up
def test_parse(self):
        """Base test for the `dket.data.decode` function."""

        words = [1, 2, 3, 0]
        formula = [12, 23, 34, 45, 0]
        example = data.encode(words, formula)
        serialized = example.SerializeToString()
        words_t, sent_len_t, formula_t, form_len_t = data.parse(serialized)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            actual = sess.run([words_t, sent_len_t, formula_t, form_len_t])
        self.assertEqual(words, actual[0].tolist())
        self.assertEqual(len(words), np.asscalar(actual[1]))
        self.assertEqual(formula, actual[2].tolist())
        self.assertEqual(len(formula), np.asscalar(actual[3])) 
Example 10
Project: pysheds   Author: mdbartos   File: rfsm.py    License: GNU General Public License v3.0 6 votes vote down vote up
def set_cumulative_capacities(self, node):
        if node.l:
            self.set_cumulative_capacities(node.l)
        if node.r:
            self.set_cumulative_capacities(node.r)
        if node.parent:
            if node.name:
                elevdiff = node.parent.elev - self.dem[self.ws[node.level] == node.name]
                vol = abs(np.asscalar(elevdiff[elevdiff > 0].sum()) * self.x * self.y)
                node.vol = vol
            else:
                leaves = []
                self.enumerate_leaves(node, level=node.level, stack=leaves)
                mask = np.isin(self.ws[node.level], leaves)
                boundary = list(chain.from_iterable([self.b[node.level].setdefault(pair, [])
                                                        for pair in combinations(leaves, 2)]))
                mask.flat[boundary] = True
                elevdiff = node.parent.elev - self.dem[mask]
                vol = abs(np.asscalar(elevdiff[elevdiff > 0].sum()) * self.x * self.y)
                node.vol = vol 
Example 11
Project: pysheds   Author: mdbartos   File: swmm.py    License: GNU General Public License v3.0 6 votes vote down vote up
def generate_storage_uncontrolled(self, ixes, **kwargs):
        storage_uncontrolled = {}
        depths = 4
        init_depths = 0.1
        storage_ends = [np.asscalar(self.endnodes[np.where(self.startnodes == ix)])
                        for ix in ixes]
        storage_uncontrolled['name'] = 'ST' + pd.Series(ixes).astype(str)
        storage_uncontrolled['elev'] = self.grid.view(self.dem).flat[storage_ends]
        storage_uncontrolled['ymax'] = self.channel_d.flat[ixes] + 1
        storage_uncontrolled['y0'] = 0
        storage_uncontrolled['Acurve'] = 'FUNCTIONAL'
        storage_uncontrolled['A0'] = self.channel_w.flat[ixes]
        storage_uncontrolled['A1'] = 0
        storage_uncontrolled['A2'] = 1
        storage_uncontrolled = pd.DataFrame.from_dict(storage_uncontrolled)
        # Manual overrides
        for key, value in kwargs.items():
            storage_uncontrolled[key] = value
        self.storage_uncontrolled = storage_uncontrolled[['name', 'elev', 'ymax', 'y0', 'Acurve',
                            'A1', 'A2', 'A0']] 
Example 12
Project: pysheds   Author: mdbartos   File: swmm.py    License: GNU General Public License v3.0 6 votes vote down vote up
def generate_storage_controlled(self, ixes, **kwargs):
        storage_controlled = {}
        depths = 2
        init_depths = 0.1
        storage_ends = [np.asscalar(self.endnodes[np.where(self.startnodes == ix)])
                        for ix in ixes]
        storage_controlled['name'] = 'C' + pd.Series(ixes).astype(str)
        storage_controlled['elev'] = self.grid.view(self.dem).flat[storage_ends]
        storage_controlled['ymax'] = depths
        storage_controlled['y0'] = 0
        storage_controlled['Acurve'] = 'FUNCTIONAL'
        storage_controlled['A0'] = 1000
        storage_controlled['A1'] = 10000
        storage_controlled['A2'] = 1
        storage_controlled = pd.DataFrame.from_dict(storage_controlled)
        # Manual overrides
        for key, value in kwargs.items():
            storage_controlled[key] = value
        self.storage_controlled = storage_controlled[['name', 'elev', 'ymax', 'y0', 'Acurve',
                            'A1', 'A2', 'A0']] 
Example 13
Project: trax   Author: google   File: arrays.py    License: Apache License 2.0 6 votes vote down vote up
def __index__(self):
    """Returns a python scalar.

    This allows using an instance of this class as an array index.
    Note that only arrays of integer types with size 1 can be used as array
    indices.

    Returns:
      A Python scalar.

    Raises:
      TypeError: If the array is not of an integer type.
      ValueError: If the array does not have size 1.
    """
    # TODO(wangpeng): Handle graph mode
    return np.asscalar(self.data.numpy()) 
Example 14
Project: training_results_v0.5   Author: mlperf   File: in_memory_eval.py    License: Apache License 2.0 6 votes vote down vote up
def end(self, session):  # pylint: disable=unused-argument
    """Runs evaluator for final model."""
    # Only runs eval at the end if highest accuracy so far
    # is less than self._stop_threshold.
    if not self._run_success:
      step = np.asscalar(session.run(self._global_step_tensor))
      logging.info('Starting eval.')
      eval_results = self._evaluate(session, step)
      mlperf_log.resnet_print(key=mlperf_log.EVAL_STOP)
      mlperf_log.resnet_print(
          key=mlperf_log.EVAL_ACCURACY,
          value={
              'epoch': max(step // self._steps_per_epoch - 1, 0),
              'value': float(eval_results[_EVAL_METRIC])
          })
      if eval_results[_EVAL_METRIC] >= self._stop_threshold:
        mlperf_log.resnet_print(
            key=mlperf_log.RUN_STOP, value={'success': 'true'})
      else:
        mlperf_log.resnet_print(
            key=mlperf_log.RUN_STOP, value={'success': 'false'}) 
Example 15
Project: vergeml   Author: mme   File: env.py    License: MIT License 5 votes vote down vote up
def _toscalar(v):
    if isinstance(v, (np.float16, np.float32, np.float64,
                      np.uint8, np.uint16, np.uint32, np.uint64,
                      np.int8, np.int16, np.int32, np.int64)):
        return np.asscalar(v)
    else:
        return v 
Example 16
Project: me-ica   Author: ME-ICA   File: nifti1.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def get_intent(self, code_repr='label'):
        ''' Get intent code, parameters and name

        Parameters
        ----------
        code_repr : string
           string giving output form of intent code representation.
           Default is 'label'; use 'code' for integer representation.

        Returns
        -------
        code : string or integer
            intent code, or string describing code
        parameters : tuple
            parameters for the intent
        name : string
            intent name

        Examples
        --------
        >>> hdr = Nifti1Header()
        >>> hdr.set_intent('t test', (10,), name='some score')
        >>> hdr.get_intent()
        ('t test', (10.0,), 'some score')
        >>> hdr.get_intent('code')
        (3, (10.0,), 'some score')
        '''
        hdr = self._structarr
        recoder = self._field_recoders['intent_code']
        code = int(hdr['intent_code'])
        if code_repr == 'code':
            label = code
        elif code_repr == 'label':
            label = recoder.label[code]
        else:
            raise TypeError('repr can be "label" or "code"')
        n_params = len(recoder.parameters[code])
        params = (float(hdr['intent_p%d' % (i+1)]) for i in range(n_params))
        name = asstr(np.asscalar(hdr['intent_name']))
        return label, tuple(params), name 
Example 17
Project: me-ica   Author: ME-ICA   File: nifti1.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def _chk_magic_offset(hdr, fix=False):
        rep = Report(HeaderDataError)
        # for ease of later string formatting, use scalar of byte string
        magic = np.asscalar(hdr['magic'])
        offset = hdr['vox_offset']
        if magic == asbytes('n+1'): # one file
            if offset >= 352:
                if not offset % 16:
                    return hdr, rep
                else:
                    # SPM uses memory mapping to read the data, and
                    # apparently this has to start on 16 byte boundaries
                    rep.problem_msg = ('vox offset (=%s) not divisible '
                                       'by 16, not SPM compatible' % offset)
                    rep.problem_level = 30
                    if fix:
                        rep.fix_msg = 'leaving at current value'
                    return hdr, rep
            rep.problem_level = 40
            rep.problem_msg = ('vox offset %d too low for '
                               'single file nifti1' % offset)
            if fix:
                hdr['vox_offset'] = 352
                rep.fix_msg = 'setting to minimum value of 352'
        elif magic != asbytes('ni1'): # two files
            # unrecognized nii magic string, oh dear
            rep.problem_msg = ('magic string "%s" is not valid' %
                               asstr(magic))
            rep.problem_level = 45
            if fix:
                rep.fix_msg = 'leaving as is, but future errors are likely'
        return hdr, rep 
Example 18
Project: me-ica   Author: ME-ICA   File: trackvis.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def _hdr_from_mapping(hdr=None, mapping=None, endianness=native_code):
    ''' Fill `hdr` from mapping `mapping`, with given endianness '''
    if hdr is None:
        # passed a valid mapping as header?  Copy and return
        if isinstance(mapping, np.ndarray):
            test_dtype = mapping.dtype.newbyteorder('=')
            if test_dtype in (header_1_dtype, header_2_dtype):
                return mapping.copy()
        # otherwise make a new empty header.   If no version specified,
        # go for default (2)
        if mapping is None:
            version = 2
        else:
            version =  mapping.get('version', 2)
        hdr = empty_header(endianness, version)
    if mapping is None:
        return hdr
    if isinstance(mapping, np.ndarray):
        mapping = rec2dict(mapping)
    for key, value in mapping.items():
        hdr[key] = value
    # check header values
    if np.asscalar(hdr['id_string'])[:5] != asbytes('TRACK'):
        raise HeaderError('Expecting TRACK as first '
                          '5 characaters of id_string')
    if hdr['version'] not in (1, 2):
        raise HeaderError('Reader only supports version 1')
    if hdr['hdr_size'] != 1000:
        raise HeaderError('hdr_size should be 1000')
    return hdr 
Example 19
Project: me-ica   Author: ME-ICA   File: volumeutils.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def rec2dict(rec):
    ''' Convert recarray to dictionary

    Also converts scalar values to scalars

    Parameters
    ----------
    rec : ndarray
       structured ndarray

    Returns
    -------
    dct : dict
       dict with key, value pairs as for `rec`

    Examples
    --------
    >>> r = np.zeros((), dtype = [('x', 'i4'), ('s', 'S10')])
    >>> d = rec2dict(r)
    >>> d == {'x': 0, 's': ''} #23dt : replace("''", "b''")
    True
    '''
    dct = {}
    for key in rec.dtype.fields:
        val = rec[key]
        try:
            val = np.asscalar(val)
        except ValueError:
            pass
        dct[key] = val
    return dct 
Example 20
Project: DualFisheye   Author: ooterness   File: fisheye.py    License: MIT License 5 votes vote down vote up
def get_x(self):
        return np.asscalar(self.center_px[0]) 
Example 21
Project: DualFisheye   Author: ooterness   File: fisheye.py    License: MIT License 5 votes vote down vote up
def get_y(self):
        return np.asscalar(self.center_px[1]) 
Example 22
Project: sadl   Author: coinse   File: sa.py    License: MIT License 5 votes vote down vote up
def _get_lsa(kde, at, removed_cols):
    refined_at = np.delete(at, removed_cols, axis=0)
    return np.asscalar(-kde.logpdf(np.transpose(refined_at))) 
Example 23
Project: brainforge   Author: csxeba   File: typing.py    License: GNU General Public License v3.0 5 votes vote down vote up
def scalX(scalar, dtype=floatX):
    return np.asscalar(np.array([scalar], dtype=dtype)) 
Example 24
Project: ludwig   Author: uber   File: batcher.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, dataset, bucketing_field, batch_size=128, buckets=10,
                 should_shuffle=True, ignore_last=False,
                 should_trim=False, trim_side='right'):
        self.should_shuffle = should_shuffle
        self.bucketing_field = bucketing_field
        self.should_trim = should_trim
        self.trim_side = trim_side

        # store our dataset as well
        self.dataset = dataset

        field = dataset.get_dataset()[bucketing_field]
        field_lengths = np.apply_along_axis(lambda x: np.sign(x).sum(), 1,
                                            field)
        sorted_idcs = np.argsort(field_lengths)
        self.buckets_idcs = []
        datapoints_per_bucket = len(field) // buckets
        for b in range(buckets):
            start = datapoints_per_bucket * b
            end = datapoints_per_bucket * (b + 1) if b < buckets - 1 else len(
                sorted_idcs)
            self.buckets_idcs.append(sorted_idcs[start:end])

        if should_shuffle:
            self.shuffle(self.buckets_idcs)

        self.ignore_last = ignore_last
        self.batch_size = batch_size
        self.total_size = min(map(len, dataset.get_dataset().values()))
        self.bucket_sizes = np.array([x for x in map(len, self.buckets_idcs)])
        self.steps_per_epoch = int(
            np.asscalar(np.sum(np.ceil(self.bucket_sizes / self.batch_size))))
        self.indices = np.array([0] * buckets)
        self.step = 0
        self.epoch = 0 
Example 25
Project: scattertext   Author: JasonKessler   File: FeatureOuput.py    License: Apache License 2.0 5 votes vote down vote up
def output(self):
		# () -> list
		toret = [{} for i in range(self.num_docs)]
		X = self.X.tocoo()
		for row, col, val in zip(X.row, X.col, X.data):
			toret[row][self.idx_store.getval(col)] = np.asscalar(val)
		return toret 
Example 26
Project: pase   Author: santi-pdp   File: knn.py    License: MIT License 5 votes vote down vote up
def main(opts):
    # find npy files in data dir
    with open(opts.data_cfg, 'r') as cfg_f:
        # contains train and test files
        cfg = json.load(cfg_f)
        train_X, train_Y, spk2idx = load_train_files(opts.data_root,
                                                     cfg, 'train')
        test_X, test_Y = load_test_files(opts.data_root, cfg)
        print('Loaded trainX: ', train_X.shape)
        print('Loaded trainY: ', train_Y.shape)
        neigh = KNeighborsClassifier(n_neighbors=opts.k, n_jobs=opts.n_jobs)
        neigh.fit(train_X, train_Y) 
        accs = []
        timings = []
        beg_t = timeit.default_timer()
        for te_idx in range(len(test_X)):
            test_x = test_X[te_idx]
            facc = []
            preds = [0.] * len(spk2idx)
            Y_ = neigh.predict(test_x)
            for ii in range(len(Y_)):
                preds[Y_[ii]] += 1
            y_ = np.argmax(preds, axis=0)
            y = test_Y[te_idx]
            if y_ == y:
                accs.append(1)
            else:
                accs.append(0.)
            end_t = timeit.default_timer()
            timings.append(end_t - beg_t)
            beg_t = timeit.default_timer()
            print('Processing test utterance {}/{}, muttime: {:.3f} s'
                  ''.format(te_idx + 1,
                            len(test_X),
                            np.mean(timings)))
        print('Score on {} samples: {}'.format(len(accs),
                                               np.mean(accs)))
        with open(opts.out_log, 'w') as out_f:
            out_f.write('{:.4f}'.format(np.asscalar(np.mean(accs)))) 
Example 27
Project: revrand   Author: NICTA   File: base.py    License: Apache License 2.0 5 votes vote down vote up
def scalar_reshape(a, newshape, order='C'):
    """
    Reshape, but also return scalars or empty lists.

    Identical to `numpy.reshape` except in the case where `newshape` is
    the empty tuple, in which case we return a scalar instead of a
    0-dimensional array.

    Examples
    --------
    >>> a = np.arange(6)
    >>> np.array_equal(np.reshape(a, (3, 2)), scalar_reshape(a, (3, 2)))
    True

    >>> scalar_reshape(np.array([3.14]), newshape=())
    3.14

    >>> scalar_reshape(np.array([2.71]), newshape=(1,))
    array([ 2.71])

    >>> scalar_reshape(np.array([]), newshape=(0,))
    []
    """
    if newshape == ():
        return np.asscalar(a)

    if newshape == (0,):
        return []

    return np.reshape(a, newshape, order) 
Example 28
Project: lambda-packs   Author: ryfeus   File: tensor_util.py    License: MIT License 5 votes vote down vote up
def ExtractBitsFromFloat16(x):
  return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16)) 
Example 29
Project: lambda-packs   Author: ryfeus   File: tensor_util.py    License: MIT License 5 votes vote down vote up
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
    tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values]) 
Example 30
Project: lambda-packs   Author: ryfeus   File: tensor_util.py    License: MIT License 5 votes vote down vote up
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
    tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])