Python numpy.isscalar() Examples

The following are 30 code examples for showing how to use numpy.isscalar(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: scanorama   Author: brianhie   File: utils.py    License: MIT License 6 votes vote down vote up
def handle_zeros_in_scale(scale, copy=True):
    ''' Makes sure that whenever scale is zero, we handle it correctly.
    This happens in most scalers when we have constant features.
    Adapted from sklearn.preprocessing.data'''

    # if we are fitting on 1D arrays, scale might be a scalar
    if np.isscalar(scale):
        if scale == .0:
            scale = 1.
        return scale
    elif isinstance(scale, np.ndarray):
        if copy:
            # New array to avoid side-effects
            scale = scale.copy()
        scale[scale == 0.0] = 1.0
    return scale 
Example 2
Project: chainerrl   Author: chainer   File: async_.py    License: MIT License 6 votes vote down vote up
def extract_params_as_shared_arrays(link):
    assert isinstance(link, chainer.Link)
    shared_arrays = {}
    for param_name, param in link.namedparams():
        typecode = param.array.dtype.char
        shared_arrays[param_name] = mp.RawArray(typecode, param.array.ravel())

    for persistent_name, persistent in chainerrl.misc.namedpersistent(link):
        if isinstance(persistent, np.ndarray):
            typecode = persistent.dtype.char
            shared_arrays[persistent_name] = mp.RawArray(
                typecode, persistent.ravel())
        else:
            assert np.isscalar(persistent)
            # Wrap by a 1-dim array because multiprocessing.RawArray does not
            # accept a 0-dim array.
            persistent_as_array = np.asarray([persistent])
            typecode = persistent_as_array.dtype.char
            shared_arrays[persistent_name] = mp.RawArray(
                typecode, persistent_as_array)
    return shared_arrays 
Example 3
Project: chainerrl   Author: chainer   File: acer.py    License: MIT License 6 votes vote down vote up
def compute_policy_gradient_full_correction(
        action_distrib, action_distrib_mu, action_value, v,
        truncation_threshold):
    """Compute off-policy bias correction term wrt all actions."""
    assert truncation_threshold is not None
    assert np.isscalar(v)
    with chainer.no_backprop_mode():
        rho_all_inv = compute_full_importance(action_distrib_mu,
                                              action_distrib)
        correction_weight = (
            np.maximum(1 - truncation_threshold * rho_all_inv,
                       np.zeros_like(rho_all_inv)) *
            action_distrib.all_prob.array[0])
        correction_advantage = action_value.q_values.array[0] - v
    return -F.sum(correction_weight *
                  action_distrib.all_log_prob *
                  correction_advantage, axis=1) 
Example 4
Project: chainerrl   Author: chainer   File: acer.py    License: MIT License 6 votes vote down vote up
def compute_policy_gradient_sample_correction(
        action_distrib, action_distrib_mu, action_value, v,
        truncation_threshold):
    """Compute off-policy bias correction term wrt a sampled action."""
    assert np.isscalar(v)
    assert truncation_threshold is not None
    with chainer.no_backprop_mode():
        sample_action = action_distrib.sample().array
        rho_dash_inv = compute_importance(
            action_distrib_mu, action_distrib, sample_action)
        if (truncation_threshold > 0 and
                rho_dash_inv >= 1 / truncation_threshold):
            return chainer.Variable(np.asarray([0], dtype=np.float32))
        correction_weight = max(0, 1 - truncation_threshold * rho_dash_inv)
        assert correction_weight <= 1
        q = float(action_value.evaluate_actions(sample_action).array[0])
        correction_advantage = q - v
    return -(correction_weight *
             action_distrib.log_prob(sample_action) *
             correction_advantage) 
Example 5
Project: DualFisheye   Author: ooterness   File: fisheye.py    License: MIT License 6 votes vote down vote up
def add_pixels(self, uv_px, img1d, weight=None):
        # Lookup row & column for each in-bounds coordinate.
        mask = self.get_mask(uv_px)
        xx = uv_px[0,mask]
        yy = uv_px[1,mask]
        # Update matrix according to assigned weight.
        if weight is None:
            img1d[mask] = self.img[yy,xx]
        elif np.isscalar(weight):
            img1d[mask] += self.img[yy,xx] * weight
        else:
            w1 = np.asmatrix(weight, dtype='float32')
            w3 = w1.transpose() * np.ones((1,3))
            img1d[mask] += np.multiply(self.img[yy,xx], w3[mask])


# A panorama image made from several FisheyeImage sources.
# TODO: Add support for supersampled anti-aliasing filters. 
Example 6
Project: respy   Author: OpenSourceEconomics   File: shared.py    License: MIT License 6 votes vote down vote up
def convert_dictionary_keys_to_dense_indices(dictionary):
    """Convert the keys to tuples containing integers.

    Example
    -------
    >>> dictionary = {(0.0, 1): 0, 2: 1}
    >>> convert_dictionary_keys_to_dense_indices(dictionary)
    {(0, 1): 0, (2,): 1}

    """
    new_dictionary = {}
    for key, val in dictionary.items():
        new_key = (int(key),) if np.isscalar(key) else tuple(int(i) for i in key)
        new_dictionary[new_key] = val

    return new_dictionary 
Example 7
Project: 3D-R2N2   Author: chrischoy   File: binvox_rw.py    License: MIT License 6 votes vote down vote up
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
    if voxel_data.ndim != 2 or voxel_data.shape[0] != 3:
        raise ValueError('voxel_data is wrong shape; should be 3xN array.')
    if np.isscalar(dims):
        dims = [dims] * 3
    dims = np.atleast_2d(dims).T
    # truncate to integers
    xyz = voxel_data.astype(np.int)
    # discard voxels that fall outside dims
    valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
    xyz = xyz[:, valid_ix]
    out = np.zeros(dims.flatten(), dtype=dtype)
    out[tuple(xyz)] = True
    return out

# def get_linear_index(x, y, z, dims):
# """ Assuming xzy order. (y increasing fastest.
# TODO ensure this is right when dims are not all same
# """
# return x*(dims[1]*dims[2]) + z*dims[1] + y 
Example 8
Project: vampyre   Author: GAMPTeam   File: discrete.py    License: MIT License 6 votes vote down vote up
def __init__(self, zval, pz, shape, var_axes=(0,),\
                 is_complex=False,name=None):
                                 
        # Convert scalars to arrays
        if np.isscalar(zval):
            zval = np.array([zval])
        if np.isscalar(pz):
            pz = np.array([pz])
            
        # Set parameters of base estimator
        dtype = zval.dtype
        BaseEst.__init__(self,shape=shape, var_axes=var_axes, dtype=dtype, name=name,\
            type_name='DiscreteEst', nvars=1, cost_avail=True)
                        
        # Set parameters
        self.zval = zval
        self.pz = pz
        self.shape = shape
        self.is_complex = is_complex
        self.fz = -np.log(pz) 
Example 9
Project: gmpe-smtk   Author: GEMScienceTools   File: residual_plots.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def _tojson(*numpy_objs):
    '''Utility function which returns a list where each element of numpy_objs
    is converted to its python equivalent (float or list)'''
    ret = []
    # problem: browsers might not be happy with JSON 'NAN', so convert
    # NaNs to None. Unfortunately, the conversion must be done element wise
    # in numpy (seems not to exist a pandas na filter):
    for obj in numpy_objs:
        isscalar = np.isscalar(obj)
        nan_indices = None if isscalar else \
            np.argwhere(np.isnan(obj)).flatten()
        # note: numpy.float64(N).tolist() returns a python float, so:
        obj = None if isscalar and np.isnan(obj) else obj.tolist()
        if nan_indices is not None:
            for idx in nan_indices:
                obj[idx] = None
        ret.append(obj)

    return ret  # tuple(_.tolist() for _ in numpy_objs) 
Example 10
Project: tenpy   Author: tenpy   File: tensordot_npc.py    License: GNU General Public License v3.0 6 votes vote down vote up
def gen_random_legcharge_nq(chinfo, ind_len, n_qsector):
    """return a random (unsorted) LegCharge with a given number of charge sectors.

    `nqsector` gives the (desired) number of sectors for each of the charges.
    """
    if np.isscalar(n_qsector):
        n_qsector = [n_qsector] * chinfo.qnumber
    n_qsector = np.asarray(n_qsector, dtype=np.intp)
    if n_qsector.shape != (chinfo.qnumber, ):
        raise ValueError
    slices = rand_partitions(0, ind_len, np.prod(n_qsector, dtype=int))
    qs = np.zeros((len(slices) - 1, len(n_qsector)), int)
    q_combos = [a for a in it.product(*[range(-(nq // 2), nq // 2 + 1) for nq in n_qsector])]
    qs = np.array(q_combos)[rand_distinct_int(0, len(q_combos) - 1, len(slices) - 1), :]
    qs = chinfo.make_valid(qs)
    return npc.LegCharge.from_qind(chinfo, slices, qs) 
Example 11
Project: tenpy   Author: tenpy   File: random_test.py    License: GNU General Public License v3.0 6 votes vote down vote up
def gen_random_legcharge_nq(chinfo, ind_len, n_qsector):
    """return a random (unsorted) LegCharge with a given number of charge sectors.

    `nqsector` gives the (desired) number of sectors for each of the charges.
    """
    if np.isscalar(n_qsector):
        n_qsector = [n_qsector] * chinfo.qnumber
    n_qsector = np.asarray(n_qsector, dtype=np.intp)
    if n_qsector.shape != (chinfo.qnumber, ):
        raise ValueError
    slices = rand_partitions(0, ind_len, np.prod(n_qsector, dtype=int))
    qs = np.zeros((len(slices) - 1, len(n_qsector)), int)
    q_combos = [a for a in it.product(*[range(-(nq // 2), nq // 2 + 1) for nq in n_qsector])]
    qs = np.array(q_combos)[rand_distinct_int(0, len(q_combos) - 1, len(slices) - 1), :]
    qs = chinfo.make_valid(qs)
    return charges.LegCharge.from_qind(chinfo, slices, qs) 
Example 12
Project: aboleth   Author: gradientinstitute   File: test_layers.py    License: Apache License 2.0 6 votes vote down vote up
def test_dense_embeddings(make_categories, reps, layer):
    """Test the embedding layer."""
    x, K = make_categories
    x = np.repeat(x, reps, axis=-1)
    N = len(x)
    S = 3
    x_, X_ = _make_placeholders(x, S, tf.int32)
    output, reg = layer(output_dim=D, n_categories=K)(X_)

    tc = tf.test.TestCase()
    with tc.test_session():
        tf.global_variables_initializer().run()
        r = reg.eval()

        assert np.isscalar(r)
        assert r >= 0

        Phi = output.eval(feed_dict={x_: x})

        assert Phi.shape == (S, N, D * reps) 
Example 13
Project: aboleth   Author: gradientinstitute   File: test_layers.py    License: Apache License 2.0 6 votes vote down vote up
def test_dense_outputs(dense, make_data):
    """Make sure the dense layers output expected dimensions."""
    x, _, _ = make_data
    S = 3

    x_, X_ = _make_placeholders(x, S)
    N = x.shape[0]

    Phi, KL = dense(output_dim=D)(X_)

    tc = tf.test.TestCase()
    with tc.test_session():
        tf.global_variables_initializer().run()
        P = Phi.eval(feed_dict={x_: x})
        assert P.shape == (S, N, D)
        assert P.dtype == np.float32
        assert np.isscalar(KL.eval(feed_dict={x_: x})) 
Example 14
Project: aboleth   Author: gradientinstitute   File: test_distributions.py    License: Apache License 2.0 6 votes vote down vote up
def test_kl_gaussian_normal(random):
    """Test Gaussian/Normal KL."""
    dim = (5, 10)
    Dim = (5, 10, 10)

    mu0 = random.randn(*dim).astype(np.float32)
    L0 = random_chol(Dim)
    q = tfp.distributions.MultivariateNormalTriL(mu0, L0)

    mu1 = random.randn(*dim).astype(np.float32)
    std1 = 1.0
    L1 = [(std1 * np.eye(dim[1])).astype(np.float32) for _ in range(dim[0])]
    p = tf.distributions.Normal(mu1, std1)

    KL = kl_sum(q, p)
    KLr = KLdiv(mu0, L0, mu1, L1)

    tc = tf.test.TestCase()
    with tc.test_session():
        kl = KL.eval()
        assert np.isscalar(kl)
        assert np.allclose(kl, KLr) 
Example 15
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __add__(self, other):
        if np.isscalar(other):
            other = Constant(other, "Constant({})".format(other))
            name = "Add({},{})".format(self.name, other.name)
            return BinOp(np.add, name)(self, other)
        assert isinstance(other, Node)
        name = "Add({},{})".format(self.name, other.name)
        return BinOp(np.add, name)(self, other) 
Example 16
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __sub__(self, other):
        if np.isscalar(other):
            other = Constant(other, "Constant({})".format(other))
            name = "Subtract({},{})".format(self.name, other.name)
            return BinOp(np.subtract, name)(self, other)
        assert isinstance(other, Node)
        name = "Subtract({},{})".format(self.name, other.name)
        return BinOp(np.subtract, name)(self, other) 
Example 17
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __rsub__(self, other):
        if not np.isscalar(other):
            raise Exception("Invalid node operation.")
        other = Constant(other, "Constant({})".format(other))
        name = "Subtract({},{})".format(other.name, self.name)
        return BinOp(np.subtract, name)(other, self) 
Example 18
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __mul__(self, other):
        if np.isscalar(other):
            other = Constant(other, "Constant({})".format(other))
            name = "Multiply({},{})".format(self.name, other.name)
            return BinOp(np.multiply, name)(self, other)
        assert isinstance(other, Node)
        name = "Multiply({},{})".format(self.name, other.name)
        return BinOp(np.multiply, name)(self, other) 
Example 19
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __truediv__(self, other):
        if np.isscalar(other):
            other = Constant(other, "Constant({})".format(other))
            name = "Divide({},{})".format(self.name, other.name)
            return BinOp(np.divide, name)(self, other)
        assert isinstance(other, Node)
        name = "Divide({},{})".format(self.name, other.name)
        return BinOp(np.divide, name)(self, other) 
Example 20
Project: ConvLab   Author: ConvLab   File: util.py    License: MIT License 5 votes vote down vote up
def epi_done(done):
    '''
    General method to check if episode is done for both single and vectorized env
    Only return True for singleton done since vectorized env does not have a natural episode boundary
    '''
    return np.isscalar(done) and done 
Example 21
Project: ncvx   Author: cvxgrp   File: integer.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, rows=1, cols=1, M=None, *args, **kwargs):
        if M is None or np.any(M <= 0):
            raise Exception("Integer requires positive values for M.")
        self.M = np.floor(M)
        if np.isscalar(self.M) and (rows, cols) != (1,1):
            self.M = self.M*np.ones((rows,cols))
        super(Integer, self).__init__(rows, cols, *args, **kwargs) 
Example 22
Project: chainerrl   Author: chainer   File: async_.py    License: MIT License 5 votes vote down vote up
def _set_persistent_values_recursively(link, persistent_name, shared_array):
    if persistent_name.startswith('/'):
        persistent_name = persistent_name[1:]
    if hasattr(link, persistent_name):
        attr_name = persistent_name
        attr = getattr(link, attr_name)
        if isinstance(attr, np.ndarray):
            setattr(link, persistent_name, np.frombuffer(
                shared_array, dtype=attr.dtype).reshape(attr.shape))
        else:
            assert np.isscalar(attr)
            # We wrap scalars with np.ndarray because
            # multiprocessing.RawValue cannot be used as a scalar, while
            # np.ndarray can be.
            typecode = np.asarray(attr).dtype.char
            setattr(link, attr_name, np.frombuffer(
                shared_array, dtype=typecode).reshape(()))
    else:
        assert isinstance(link, (chainer.Chain, chainer.ChainList))
        assert '/' in persistent_name
        child_name, remaining = persistent_name.split('/', 1)
        if isinstance(link, chainer.Chain):
            _set_persistent_values_recursively(
                getattr(link, child_name), remaining, shared_array)
        else:
            _set_persistent_values_recursively(
                link[int(child_name)], remaining, shared_array) 
Example 23
Project: chainerrl   Author: chainer   File: acer.py    License: MIT License 5 votes vote down vote up
def compute_one_step_pi_loss(self, action, advantage, action_distrib,
                                 action_distrib_mu, action_value, v,
                                 avg_action_distrib):
        assert np.isscalar(advantage)
        assert np.isscalar(v)

        g_loss = compute_policy_gradient_loss(
            action=action,
            advantage=advantage,
            action_distrib=action_distrib,
            action_distrib_mu=action_distrib_mu,
            action_value=action_value,
            v=v,
            truncation_threshold=self.truncation_threshold)

        if self.use_trust_region:
            pi_loss, kl = compute_loss_with_kl_constraint(
                action_distrib, avg_action_distrib, g_loss,
                delta=self.trust_region_delta)
            self.average_kl += (
                (1 - self.average_kl_decay) * (kl - self.average_kl))
        else:
            pi_loss = g_loss

        # Entropy is maximized
        pi_loss -= self.beta * action_distrib.entropy
        return pi_loss 
Example 24
Project: chainerrl   Author: chainer   File: acer.py    License: MIT License 5 votes vote down vote up
def update(self, t_start, t_stop, R, states, actions, rewards, values,
               action_values, action_distribs, action_distribs_mu,
               avg_action_distribs):

        assert np.isscalar(R)

        total_loss = self.compute_loss(
            t_start=t_start,
            t_stop=t_stop,
            R=R,
            states=states,
            actions=actions,
            rewards=rewards,
            values=values,
            action_values=action_values,
            action_distribs=action_distribs,
            action_distribs_mu=action_distribs_mu,
            avg_action_distribs=avg_action_distribs)

        # Compute gradients using thread-specific model
        self.model.cleargrads()
        F.squeeze(total_loss).backward()
        # Copy the gradients to the globally shared model
        copy_param.copy_grad(
            target_link=self.shared_model, source_link=self.model)
        # Update the globally shared model
        if self.process_idx == 0:
            norm = sum(np.sum(np.square(param.grad))
                       for param in self.optimizer.target.params()
                       if param.grad is not None)
            self.logger.debug('grad norm:%s', norm)
        self.optimizer.update()

        self.sync_parameters()
        if isinstance(self.model, Recurrent):
            self.model.unchain_backward() 
Example 25
Project: recordlinkage   Author: J535D165   File: test_classify.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_fs_parameters(self, classifier):

        cl = classifier()
        if isinstance(cl, tuple(UNSUPERVISED_CLASSIFIERS)):
            cl.fit(self.X_train)
        else:
            cl.fit(self.X_train, self.y_train)

        # p
        assert np.isscalar(cl.p)
        assert np.exp(cl.log_p) == cl.p

        # m
        assert isinstance(cl.m_probs, dict)
        assert len(cl.m_probs.keys()) == self.X_train.shape[1]
        for col, value in cl.m_probs.items():
            for key, out in value.items():
                assert_almost_equal(
                    np.exp(cl.log_m_probs[col][key]),
                    cl.m_probs[col][key]
                )

        # u
        assert isinstance(cl.u_probs, dict)
        assert len(cl.u_probs.keys()) == self.X_train.shape[1]
        for col, value in cl.u_probs.items():
            for key, out in value.items():
                assert_almost_equal(
                    np.exp(cl.log_u_probs[col][key]),
                    cl.u_probs[col][key]
                ) 
Example 26
Project: Attention-Gated-Networks   Author: ozan-oktay   File: visualiser.py    License: MIT License 5 votes vote down vote up
def plot_current_errors(self, epoch, errors, split_name, counter_ratio=0.0, **kwargs):
        if self.display_id > 0:
            for key in errors.keys():
                x = epoch + counter_ratio
                y = errors[key]
                if isinstance(y, dict):
                    if y['type'] == 'table':
                        self.plot_table_html(x,y,key,split_name, **kwargs)
                elif np.isscalar(y):
                    self.plot_line(x,y,key,split_name)
                elif y.ndim == 2:
                    self.plot_heatmap(x,y,key,split_name, **kwargs)


    # errors: same format as |errors| of plotCurrentErrors 
Example 27
Project: Attention-Gated-Networks   Author: ozan-oktay   File: visualiser.py    License: MIT License 5 votes vote down vote up
def print_current_errors(self, epoch, errors, split_name):
        message = '(epoch: %d, split: %s) ' % (epoch, split_name)
        for k, v in errors.items():
            if np.isscalar(v):
                message += '%s: %.3f ' % (k, v)

        print(message)
        with open(self.log_name, "a") as log_file:
            log_file.write('%s\n' % message)

    # save image to the disk 
Example 28
Project: Attention-Gated-Networks   Author: ozan-oktay   File: error_logger.py    License: MIT License 5 votes vote down vote up
def update(self, input_dict, split):

        for key, value in input_dict.items():
            if key not in self.variables[split]:
                if np.isscalar(value):
                    self.variables[split][key] = AverageMeter(name=key)
                else:
                    self.variables[split][key] = BaseMeter(name=key)

            self.variables[split][key].update(value) 
Example 29
Project: whynot   Author: zykls   File: test_envs.py    License: MIT License 5 votes vote down vote up
def test_env(spec):
    # Capture warnings
    with pytest.warns(None) as warnings:
        env = spec.make()

    # Check that dtype is explicitly declared for gym.Box spaces
    for warning_msg in warnings:
        assert not "autodetected dtype" in str(warning_msg.message)

    ob_space = env.observation_space
    act_space = env.action_space
    ob = env.reset()
    assert ob_space.contains(ob), "Reset observation: {!r} not in space".format(ob)
    a = act_space.sample()
    observation, reward, done, _info = env.step(a)
    assert ob_space.contains(observation), "Step observation: {!r} not in space".format(
        observation
    )
    assert np.isscalar(reward), "{} is not a scalar for {}".format(reward, env)
    assert isinstance(done, bool), "Expected {} to be a boolean".format(done)

    for mode in env.metadata.get("render.modes", []):
        env.render(mode=mode)

    # Make sure we can render the environment after close.
    for mode in env.metadata.get("render.modes", []):
        env.render(mode=mode)

    env.close()


# Run a longer rollout on some environments 
Example 30
Project: pulse2percept   Author: pulse2percept   File: base.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _from_source(self, source):
        """Extract the data container and time information from source data

        This private method converts input data from allowable source types
        into a 2D NumPy array, where the first dimension denotes electrodes
        and the second dimension denotes points in time.

        Some stimuli don't have a time component (such as a stimulus created
        from a scalar or 1D NumPy array. In this case, times=None.
        """
        if np.isscalar(source) and not isinstance(source, str):
            # Scalar: 1 electrode, no time component
            data = np.array([source], dtype=np.float32).reshape((1, -1))
            time = None
            electrodes = None
        elif isinstance(source, (list, tuple)):
            # List or touple with N elements: 1 electrode, N time points
            data = np.array(source, dtype=np.float32).reshape((1, -1))
            time = np.arange(data.shape[-1], dtype=np.float32)
            electrodes = None
        elif isinstance(source, np.ndarray):
            if source.ndim > 1:
                raise ValueError("Cannot create Stimulus object from a %d-D "
                                 "NumPy array. Must be 1-D." % source.ndim)
            # 1D NumPy array with N elements: 1 electrode, N time points
            data = source.astype(np.float32).reshape((1, -1))
            time = np.arange(data.shape[-1], dtype=np.float32)
            electrodes = None
        elif isinstance(source, Stimulus):
            # e.g. from a dictionary of Stimulus objects
            data = source.data
            time = source.time
            electrodes = source.electrodes
        else:
            raise TypeError("Cannot create Stimulus object from %s. Choose "
                            "from: scalar, tuple, list, NumPy array, or "
                            "Stimulus." % type(source))
        return time, data, electrodes