Python numpy.ndim() Examples

The following are 30 code examples for showing how to use numpy.ndim(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: feets   Author: quatrope   File: core.py    License: MIT License 6 votes vote down vote up
def __repr__(self):
        """x.__repr__() <==> repr(x)."""
        if not hasattr(self, "__repr"):
            params = self.params or {}
            parsed_params = []
            for k, v in params.items():
                sk = str(k)
                if np.ndim(v) != 0 and np.size(v) > MAX_VALUES_TO_REPR:
                    tv = type(v)
                    sv = f"<{tv.__module__}.{tv.__name__}>"
                else:
                    sv = str(v)
                parsed_params.append(f"{sk}={sv}")
            str_params = ", ".join(parsed_params)
            self.__repr = f"{self.name}({str_params})"

        return self.__repr 
Example 2
Project: chainer-stylegan   Author: pfnet-research   File: dataset_augmentor.py    License: MIT License 6 votes vote down vote up
def augment(self, image, isArray=False):
        if isArray: # if the input is a numpy array, convert back to PIL
            image = Image.fromarray(image)
        image = self.transform(image)
        image = np.asarray(image).astype('f')
        w, h = image.shape[0], image.shape[1]
        if np.ndim(image) == 2:
            ch = 1
        else:
            ch = np.shape(image)[2]
        image = image.reshape(w, h, ch)
        image = image.transpose((2, 0, 1))
        if self.scaling == 'none':
            return image 
        elif self.scaling == 'sigmoid':
            return self._scaling_sigmoid(image)
        elif self.scaling == 'tanh':
            return self._scaling_tanh(image)
        else:
            raise NotImplementedError 
Example 3
Project: recruit   Author: Frank-qlu   File: fromnumeric.py    License: Apache License 2.0 6 votes vote down vote up
def rank(a):
    """
    Return the number of dimensions of an array.

    .. note::
        This function is deprecated in NumPy 1.9 to avoid confusion with
        `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
        should be used instead.

    See Also
    --------
    ndim : equivalent non-deprecated function

    Notes
    -----
    In the old Numeric package, `rank` was the term used for the number of
    dimensions, but in NumPy `ndim` is used instead.
    """
    # 2014-04-12, 1.9
    warnings.warn(
        "`rank` is deprecated; use the `ndim` attribute or function instead. "
        "To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
        VisibleDeprecationWarning, stacklevel=2)
    return ndim(a) 
Example 4
Project: mplcursors   Author: anntzer   File: _pick_info.py    License: MIT License 6 votes vote down vote up
def _format_scalarmappable_value(artist, idx):  # matplotlib/matplotlib#12473.
    data = artist.get_array()[idx]
    if np.ndim(data) == 0:
        if not artist.colorbar:
            fig = Figure()
            ax = fig.subplots()
            artist.colorbar = fig.colorbar(artist, cax=ax)
            # This hack updates the ticks without actually paying the cost of
            # drawing (RendererBase.draw_path raises NotImplementedError).
            try:
                ax.yaxis.draw(RendererBase())
            except NotImplementedError:
                pass
        fmt = artist.colorbar.formatter.format_data_short
        return "[" + _strip_math(fmt(data).strip()) + "]"
    else:
        return artist.format_cursor_data(data)  # Includes brackets. 
Example 5
Project: face_classification   Author: oarriaga   File: grad_cam.py    License: MIT License 6 votes vote down vote up
def deprocess_image(x):
    """ Same normalization as in:
    https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py
    """
    if np.ndim(x) > 3:
        x = np.squeeze(x)
    # normalize tensor: center on 0., ensure std is 0.1
    x = x - x.mean()
    x = x / (x.std() + 1e-5)
    x = x * 0.1

    # clip to [0, 1]
    x = x + 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x = x * 255
    if K.image_dim_ordering() == 'th':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x 
Example 6
Project: lambda-packs   Author: ryfeus   File: fromnumeric.py    License: MIT License 6 votes vote down vote up
def rank(a):
    """
    Return the number of dimensions of an array.

    .. note::
        This function is deprecated in NumPy 1.9 to avoid confusion with
        `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
        should be used instead.

    See Also
    --------
    ndim : equivalent non-deprecated function

    Notes
    -----
    In the old Numeric package, `rank` was the term used for the number of
    dimensions, but in NumPy `ndim` is used instead.
    """
    # 2014-04-12, 1.9
    warnings.warn(
        "`rank` is deprecated; use the `ndim` attribute or function instead. "
        "To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
        VisibleDeprecationWarning, stacklevel=2)
    return ndim(a) 
Example 7
Project: simpleflow   Author: PytLab   File: operations.py    License: MIT License 5 votes vote down vote up
def compute_gradient(self, grad=None):
        ''' Compute the gradients for this operation wrt input values.

        :param grad: The gradient of other operation wrt the addition output.
        :type grad: number or a ndarray, default value is 1.0.
        '''
        x, y = [node.output_value for node in self.input_nodes]

        if grad is None:
            grad = np.ones_like(self.output_value)

        grad_wrt_x = grad
        while np.ndim(grad_wrt_x) > len(np.shape(x)):
            grad_wrt_x = np.sum(grad_wrt_x, axis=0)
        for axis, size in enumerate(np.shape(x)):
            if size == 1:
                grad_wrt_x = np.sum(grad_wrt_x, axis=axis, keepdims=True)

        grad_wrt_y = grad
        while np.ndim(grad_wrt_y) > len(np.shape(y)):
            grad_wrt_y = np.sum(grad_wrt_y, axis=0)
        for axis, size in enumerate(np.shape(y)):
            if size == 1:
                grad_wrt_y = np.sum(grad_wrt_y, axis=axis, keepdims=True)

        return [grad_wrt_x, grad_wrt_y] 
Example 8
Project: simpleflow   Author: PytLab   File: operations.py    License: MIT License 5 votes vote down vote up
def compute_gradient(self, grad=None):
        ''' Compute and return gradients for this operation wrt input values.

        :param grad: The gradient of other operation wrt the mutiply output.
        :type grad: number or a ndarray.
        '''
        x, y = [node.output_value for node in self.input_nodes]

        if grad is None:
            grad = np.ones_like(self.output_value)

        grad_wrt_x = grad*y
        while np.ndim(grad_wrt_x) > len(np.shape(x)):
            grad_wrt_x = np.sum(grad_wrt_x, axis=0)
        for axis, size in enumerate(np.shape(x)):
            if size == 1:
                grad_wrt_x = np.sum(grad_wrt_x, axis=axis, keepdims=True)

        grad_wrt_y = grad*x
        while np.ndim(grad_wrt_y) > len(np.shape(y)):
            grad_wrt_y = np.sum(grad_wrt_y, axis=0)
        for axis, size in enumerate(np.shape(y)):
            if size == 1:
                grad_wrt_y = np.sum(grad_wrt_y, axis=axis, keepdims=True)

        return [grad_wrt_x, grad_wrt_y] 
Example 9
Project: tangent   Author: google   File: tangents.py    License: Apache License 2.0 5 votes vote down vote up
def tndim(z, x):
  d[z] = numpy.ndim(d[x]) 
Example 10
Project: feets   Author: quatrope   File: core.py    License: MIT License 5 votes vote down vote up
def flatten_feature(self, feature, value, **kwargs):
        """Convert the features into a dict of 1 dimension values.

        The methods check if the dimension of the value is 1 then a
        dictionary with key the feature name, and the value the value.
        In other cases an recursive approach is taken where every feature
        has as name `feature_<N>` as name, where N is the current dimension.

        Example
        -------

        .. code-block:: pycon

            >>> e.flatten("name", 1)
            {'name': 1}
            >>> e.flatten("name", [1, 2, 3])
            {'name_0': 1, 'name_1': 2, 'name_2': 3}
            >>> e.flatten("name", [1, [2, 3]])
            {'name_0': 1, 'name_1_0': 2, 'name_1_1': 3}
            >>> flatten("name", [[1, 2], [3, 4]])
            {'name_0_0': 1, 'name_0_1': 2, 'name_1_0': 3, 'name_1_1': 4}

        """
        if np.ndim(value) == 0:
            return {feature: value}
        flatten_values = {}
        for idx, v in enumerate(value):
            flatten_name = f"{feature}_{idx}"
            flatten_values.update(
                self.flatten_feature(flatten_name, v, **kwargs)
            )
        return flatten_values 
Example 11
Project: mabwiser   Author: fidelity   File: test_radius.py    License: Apache License 2.0 5 votes vote down vote up
def test_partial_fit_greedy0_r2(self):

        arms, mab = self.predict(arms=[1, 2, 3, 4],
                                 decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
                                 rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
                                 learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0),
                                 neighborhood_policy=NeighborhoodPolicy.Radius(2),
                                 context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
                                                  [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
                                                  [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
                                                  [0, 2, 1, 0, 0]],
                                 contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
                                 seed=123456,
                                 num_run=1,
                                 is_predict=True)

        self.assertListEqual(arms, [3, 1])
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.rewards), 10)
        self.assertEqual(len(mab._imp.contexts), 10)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)

        decisions2 = [1, 2, 3]
        rewards2 = [1, 1, 1]
        context_history2 = [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]
        mab.partial_fit(decisions2, rewards2, context_history2)

        self.assertEqual(len(mab._imp.decisions), 13)
        self.assertEqual(len(mab._imp.rewards), 13)
        self.assertEqual(len(mab._imp.contexts), 13)
        self.assertEqual(np.ndim(mab._imp.decisions), 1) 
Example 12
Project: mabwiser   Author: fidelity   File: test_radius.py    License: Apache License 2.0 5 votes vote down vote up
def test_partial_fit_thompson_thresholds(self):

        arm_to_threshold = {1: 1, 2: 5, 3: 2, 4: 3}

        def binarize(arm, reward):
            return reward >= arm_to_threshold[arm]

        arms, mab = self.predict(arms=[1, 2, 3, 4],
                                 decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
                                 rewards=[0, 1, 7, 0, 1, 9, 0, 2, 6, 11],
                                 learning_policy=LearningPolicy.ThompsonSampling(binarize),
                                 neighborhood_policy=NeighborhoodPolicy.Radius(2),
                                 context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
                                                  [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
                                                  [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
                                                  [0, 2, 1, 0, 0]],
                                 contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
                                 seed=123456,
                                 num_run=1,
                                 is_predict=True)

        self.assertTrue(mab._imp.lp.is_contextual_binarized)
        self.assertListEqual(arms, [3, 4])
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.rewards), 10)
        self.assertEqual(len(mab._imp.contexts), 10)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertTrue(mab._imp.rewards.all() in [0, 1])

        decisions2 = [1, 2, 3]
        rewards2 = [11, 1, 6]
        context_history2 = [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]
        mab.partial_fit(decisions2, rewards2, context_history2)

        self.assertEqual(len(mab._imp.decisions), 13)
        self.assertEqual(len(mab._imp.rewards), 13)
        self.assertEqual(len(mab._imp.contexts), 13)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertTrue(mab._imp.rewards.all() in [0, 1]) 
Example 13
Project: mabwiser   Author: fidelity   File: test_radius.py    License: Apache License 2.0 5 votes vote down vote up
def test_fit_twice_thompson_thresholds(self):

        arm_to_threshold = {1: 1, 2: 5, 3: 2, 4: 3}

        def binarize(arm, reward):
            return reward >= arm_to_threshold[arm]

        arms, mab = self.predict(arms=[1, 2, 3, 4],
                                 decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
                                 rewards=[0, 1, 7, 0, 1, 9, 0, 2, 6, 11],
                                 learning_policy=LearningPolicy.ThompsonSampling(binarize),
                                 neighborhood_policy=NeighborhoodPolicy.Radius(2),
                                 context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
                                                  [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
                                                  [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
                                                  [0, 2, 1, 0, 0]],
                                 contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
                                 seed=123456,
                                 num_run=1,
                                 is_predict=True)

        self.assertTrue(mab._imp.lp.is_contextual_binarized)
        self.assertListEqual(arms, [3, 4])
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.rewards), 10)
        self.assertEqual(len(mab._imp.contexts), 10)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertTrue(mab._imp.rewards.all() in [0, 1])

        decisions2 = [1, 2, 3]
        rewards2 = [11, 1, 6]
        context_history2 = [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]
        mab.fit(decisions2, rewards2, context_history2)

        self.assertEqual(len(mab._imp.decisions), 3)
        self.assertEqual(len(mab._imp.rewards), 3)
        self.assertEqual(len(mab._imp.contexts), 3)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertTrue(mab._imp.rewards.all() in [0, 1]) 
Example 14
Project: mabwiser   Author: fidelity   File: test_clusters.py    License: Apache License 2.0 5 votes vote down vote up
def test_partial_fit_thompson_thresholds(self):

        arm_to_threshold = {1: 1, 2: 5, 3: 2, 4: 3}

        def binarize(arm, reward):
            return reward >= arm_to_threshold[arm]

        arms, mab = self.predict(arms=[1, 2, 3, 4],
                                 decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
                                 rewards=[0, 1, 7, 0, 1, 9, 0, 2, 6, 11],
                                 learning_policy=LearningPolicy.ThompsonSampling(binarize),
                                 neighborhood_policy=NeighborhoodPolicy.Clusters(3),
                                 context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
                                                  [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
                                                  [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
                                                  [0, 2, 1, 0, 0]],
                                 contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
                                 seed=123456,
                                 num_run=1,
                                 is_predict=True)

        self.assertTrue(mab._imp.lp_list[0].is_contextual_binarized)
        self.assertListEqual(arms, [3, 4])
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.rewards), 10)
        self.assertEqual(len(mab._imp.contexts), 10)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertListEqual(list(set(mab._imp.rewards)), [0, 1])

        decisions2 = [1, 2, 3]
        rewards2 = [11, 1, 6]
        context_history2 = [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]
        mab.partial_fit(decisions2, rewards2, context_history2)

        self.assertEqual(len(mab._imp.decisions), 13)
        self.assertEqual(len(mab._imp.rewards), 13)
        self.assertEqual(len(mab._imp.contexts), 13)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertListEqual(list(set(mab._imp.rewards)), [0, 1]) 
Example 15
Project: mabwiser   Author: fidelity   File: test_clusters.py    License: Apache License 2.0 5 votes vote down vote up
def test_fit_twice_thompson_thresholds(self):

        arm_to_threshold = {1: 1, 2: 5, 3: 2, 4: 3}

        def binarize(arm, reward):
            return reward >= arm_to_threshold[arm]

        arms, mab = self.predict(arms=[1, 2, 3, 4],
                                 decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
                                 rewards=[0, 1, 7, 0, 1, 9, 0, 2, 6, 11],
                                 learning_policy=LearningPolicy.ThompsonSampling(binarize),
                                 neighborhood_policy=NeighborhoodPolicy.Clusters(3),
                                 context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
                                                  [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
                                                  [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
                                                  [0, 2, 1, 0, 0]],
                                 contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
                                 seed=123456,
                                 num_run=1,
                                 is_predict=True)

        self.assertTrue(mab._imp.lp_list[0].is_contextual_binarized)
        self.assertListEqual(arms, [3, 4])
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.rewards), 10)
        self.assertEqual(len(mab._imp.contexts), 10)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertListEqual(list(set(mab._imp.rewards)), [0, 1])

        decisions2 = [1, 2, 3]
        rewards2 = [11, 1, 6]
        context_history2 = [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]
        mab.fit(decisions2, rewards2, context_history2)

        self.assertEqual(len(mab._imp.decisions), 3)
        self.assertEqual(len(mab._imp.rewards), 3)
        self.assertEqual(len(mab._imp.contexts), 3)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertListEqual(list(set(mab._imp.rewards)), [0, 1]) 
Example 16
Project: mabwiser   Author: fidelity   File: test_nearest.py    License: Apache License 2.0 5 votes vote down vote up
def test_partial_fit_greedy0_r2(self):

        arms, mab = self.predict(arms=[1, 2, 3, 4],
                                 decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
                                 rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
                                 learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0),
                                 neighborhood_policy=NeighborhoodPolicy.KNearest(2),
                                 context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
                                                  [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
                                                  [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
                                                  [0, 2, 1, 0, 0]],
                                 contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
                                 seed=123456,
                                 num_run=1,
                                 is_predict=True)

        self.assertListEqual(arms, [1, 1])
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.rewards), 10)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)

        decisions2 = [1, 2, 3]
        rewards2 = [1, 1, 1]
        context_history2 = [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]
        mab.partial_fit(decisions2, rewards2, context_history2)

        self.assertEqual(len(mab._imp.decisions), 13)
        self.assertEqual(len(mab._imp.rewards), 13)
        self.assertEqual(len(mab._imp.contexts), 13)
        self.assertEqual(np.ndim(mab._imp.decisions), 1) 
Example 17
Project: mabwiser   Author: fidelity   File: test_nearest.py    License: Apache License 2.0 5 votes vote down vote up
def test_partial_fit_thompson_thresholds(self):

        arm_to_threshold = {1: 1, 2: 5, 3: 2, 4: 3}

        def binarize(arm, reward):
            return reward >= arm_to_threshold[arm]

        arms, mab = self.predict(arms=[1, 2, 3, 4],
                                 decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
                                 rewards=[0, 1, 7, 0, 1, 9, 0, 2, 6, 11],
                                 learning_policy=LearningPolicy.ThompsonSampling(binarize),
                                 neighborhood_policy=NeighborhoodPolicy.KNearest(2),
                                 context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
                                                  [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
                                                  [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
                                                  [0, 2, 1, 0, 0]],
                                 contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
                                 seed=123456,
                                 num_run=1,
                                 is_predict=True)

        self.assertTrue(mab._imp.lp.is_contextual_binarized)
        self.assertListEqual(arms, [4, 4])
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.rewards), 10)
        self.assertEqual(len(mab._imp.contexts), 10)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertListEqual(list(set(mab._imp.rewards)), [0, 1])

        decisions2 = [1, 2, 3]
        rewards2 = [11, 1, 6]
        context_history2 = [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]
        mab.partial_fit(decisions2, rewards2, context_history2)

        self.assertEqual(len(mab._imp.decisions), 13)
        self.assertEqual(len(mab._imp.rewards), 13)
        self.assertEqual(len(mab._imp.contexts), 13)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        arm = mab.predict([[0, 1, 2, 3, 5]])
        self.assertEqual(arm, 3)
        self.assertListEqual(list(set(mab._imp.rewards)), [0, 1]) 
Example 18
Project: mabwiser   Author: fidelity   File: test_nearest.py    License: Apache License 2.0 5 votes vote down vote up
def test_fit_twice_thompson_thresholds(self):

        arm_to_threshold = {1: 1, 2: 5, 3: 2, 4: 3}

        def binarize(arm, reward):
            return reward >= arm_to_threshold[arm]

        arms, mab = self.predict(arms=[1, 2, 3, 4],
                                 decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
                                 rewards=[0, 1, 7, 0, 1, 9, 0, 2, 6, 11],
                                 learning_policy=LearningPolicy.ThompsonSampling(binarize),
                                 neighborhood_policy=NeighborhoodPolicy.KNearest(2),
                                 context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
                                                  [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
                                                  [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
                                                  [0, 2, 1, 0, 0]],
                                 contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
                                 seed=123456,
                                 num_run=1,
                                 is_predict=True)

        self.assertTrue(mab._imp.lp.is_contextual_binarized)
        self.assertListEqual(arms, [4, 4])
        self.assertEqual(len(mab._imp.decisions), 10)
        self.assertEqual(len(mab._imp.rewards), 10)
        self.assertEqual(len(mab._imp.contexts), 10)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertListEqual(list(set(mab._imp.rewards)), [0, 1])

        decisions2 = [1, 2, 3]
        rewards2 = [11, 1, 6]
        context_history2 = [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]
        mab.fit(decisions2, rewards2, context_history2)

        self.assertEqual(len(mab._imp.decisions), 3)
        self.assertEqual(len(mab._imp.rewards), 3)
        self.assertEqual(len(mab._imp.contexts), 3)
        self.assertEqual(np.ndim(mab._imp.decisions), 1)
        self.assertListEqual(list(set(mab._imp.rewards)), [0, 1]) 
Example 19
Project: recruit   Author: Frank-qlu   File: function_base.py    License: Apache License 2.0 5 votes vote down vote up
def _update_dim_sizes(dim_sizes, arg, core_dims):
    """
    Incrementally check and update core dimension sizes for a single argument.

    Arguments
    ---------
    dim_sizes : Dict[str, int]
        Sizes of existing core dimensions. Will be updated in-place.
    arg : ndarray
        Argument to examine.
    core_dims : Tuple[str, ...]
        Core dimensions for this argument.
    """
    if not core_dims:
        return

    num_core_dims = len(core_dims)
    if arg.ndim < num_core_dims:
        raise ValueError(
            '%d-dimensional argument does not have enough '
            'dimensions for all core dimensions %r'
            % (arg.ndim, core_dims))

    core_shape = arg.shape[-num_core_dims:]
    for dim, size in zip(core_dims, core_shape):
        if dim in dim_sizes:
            if size != dim_sizes[dim]:
                raise ValueError(
                    'inconsistent size for core dimension %r: %r vs %r'
                    % (dim, size, dim_sizes[dim]))
        else:
            dim_sizes[dim] = size 
Example 20
Project: recruit   Author: Frank-qlu   File: function_base.py    License: Apache License 2.0 5 votes vote down vote up
def _parse_input_dimensions(args, input_core_dims):
    """
    Parse broadcast and core dimensions for vectorize with a signature.

    Arguments
    ---------
    args : Tuple[ndarray, ...]
        Tuple of input arguments to examine.
    input_core_dims : List[Tuple[str, ...]]
        List of core dimensions corresponding to each input.

    Returns
    -------
    broadcast_shape : Tuple[int, ...]
        Common shape to broadcast all non-core dimensions to.
    dim_sizes : Dict[str, int]
        Common sizes for named core dimensions.
    """
    broadcast_args = []
    dim_sizes = {}
    for arg, core_dims in zip(args, input_core_dims):
        _update_dim_sizes(dim_sizes, arg, core_dims)
        ndim = arg.ndim - len(core_dims)
        dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim])
        broadcast_args.append(dummy_array)
    broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)
    return broadcast_shape, dim_sizes 
Example 21
Project: recruit   Author: Frank-qlu   File: test_core.py    License: Apache License 2.0 5 votes vote down vote up
def test_basicattributes(self):
        # Tests some basic array attributes.
        a = array([1, 3, 2])
        b = array([1, 3, 2], mask=[1, 0, 1])
        assert_equal(a.ndim, 1)
        assert_equal(b.ndim, 1)
        assert_equal(a.size, 3)
        assert_equal(b.size, 3)
        assert_equal(a.shape, (3,))
        assert_equal(b.shape, (3,)) 
Example 22
Project: recruit   Author: Frank-qlu   File: test_core.py    License: Apache License 2.0 5 votes vote down vote up
def test_fillvalue_exotic_dtype(self):
        # Tests yet more exotic flexible dtypes
        _check_fill_value = np.ma.core._check_fill_value
        ndtype = [('i', int), ('s', '|S8'), ('f', float)]
        control = np.array((default_fill_value(0),
                            default_fill_value('0'),
                            default_fill_value(0.),),
                           dtype=ndtype)
        assert_equal(_check_fill_value(None, ndtype), control)
        # The shape shouldn't matter
        ndtype = [('f0', float, (2, 2))]
        control = np.array((default_fill_value(0.),),
                           dtype=[('f0', float)]).astype(ndtype)
        assert_equal(_check_fill_value(None, ndtype), control)
        control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
        assert_equal(_check_fill_value(0, ndtype), control)

        ndtype = np.dtype("int, (2,3)float, float")
        control = np.array((default_fill_value(0),
                            default_fill_value(0.),
                            default_fill_value(0.),),
                           dtype="int, float, float").astype(ndtype)
        test = _check_fill_value(None, ndtype)
        assert_equal(test, control)
        control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
        assert_equal(_check_fill_value(0, ndtype), control)
        # but when indexing, fill value should become scalar not tuple
        # See issue #6723
        M = masked_array(control)
        assert_equal(M["f1"].fill_value.ndim, 0) 
Example 23
Project: recruit   Author: Frank-qlu   File: test_core.py    License: Apache License 2.0 5 votes vote down vote up
def test_compressed(self):
        # Test ma.compressed function.
        # Address gh-4026
        a = np.ma.array([1, 2])
        test = np.ma.compressed(a)
        assert_(type(test) is np.ndarray)

        # Test case when input data is ndarray subclass
        class A(np.ndarray):
            pass

        a = np.ma.array(A(shape=0))
        test = np.ma.compressed(a)
        assert_(type(test) is A)

        # Test that compress flattens
        test = np.ma.compressed([[1],[2]])
        assert_equal(test.ndim, 1)
        test = np.ma.compressed([[[[[1]]]]])
        assert_equal(test.ndim, 1)

        # Test case when input is MaskedArray subclass
        class M(MaskedArray):
            pass

        test = np.ma.compressed(M(shape=(0,1,2)))
        assert_equal(test.ndim, 1)

        # with .compressed() overridden
        class M(MaskedArray):
            def compressed(self):
                return 42

        test = np.ma.compressed(M(shape=(0,1,2)))
        assert_equal(test, 42) 
Example 24
Project: recruit   Author: Frank-qlu   File: test_core.py    License: Apache License 2.0 5 votes vote down vote up
def test_count(self):
        # test np.ma.count specially

        d = np.arange(24.0).reshape((2,3,4))
        m = np.zeros(24, dtype=bool).reshape((2,3,4))
        m[:,0,:] = True
        a = np.ma.array(d, mask=m)

        assert_equal(count(a), 16)
        assert_equal(count(a, axis=1), 2*ones((2,4)))
        assert_equal(count(a, axis=(0,1)), 4*ones((4,)))
        assert_equal(count(a, keepdims=True), 16*ones((1,1,1)))
        assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4)))
        assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4)))
        assert_equal(count(a, axis=-2), 2*ones((2,4)))
        assert_raises(ValueError, count, a, axis=(1,1))
        assert_raises(np.AxisError, count, a, axis=3)

        # check the 'nomask' path
        a = np.ma.array(d, mask=nomask)

        assert_equal(count(a), 24)
        assert_equal(count(a, axis=1), 3*ones((2,4)))
        assert_equal(count(a, axis=(0,1)), 6*ones((4,)))
        assert_equal(count(a, keepdims=True), 24*ones((1,1,1)))
        assert_equal(np.ndim(count(a, keepdims=True)), 3)
        assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4)))
        assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4)))
        assert_equal(count(a, axis=-2), 3*ones((2,4)))
        assert_raises(ValueError, count, a, axis=(1,1))
        assert_raises(np.AxisError, count, a, axis=3)

        # check the 'masked' singleton
        assert_equal(count(np.ma.masked), 0)

        # check 0-d arrays do not allow axis > 0
        assert_raises(np.AxisError, count, np.ma.array(1), axis=1) 
Example 25
Project: recruit   Author: Frank-qlu   File: numeric.py    License: Apache License 2.0 5 votes vote down vote up
def ascontiguousarray(a, dtype=None):
    """
    Return a contiguous array (ndim >= 1) in memory (C order).

    Parameters
    ----------
    a : array_like
        Input array.
    dtype : str or dtype object, optional
        Data-type of returned array.

    Returns
    -------
    out : ndarray
        Contiguous array of same shape and content as `a`, with type `dtype`
        if specified.

    See Also
    --------
    asfortranarray : Convert input to an ndarray with column-major
                     memory order.
    require : Return an ndarray that satisfies requirements.
    ndarray.flags : Information about the memory layout of the array.

    Examples
    --------
    >>> x = np.arange(6).reshape(2,3)
    >>> np.ascontiguousarray(x, dtype=np.float32)
    array([[ 0.,  1.,  2.],
           [ 3.,  4.,  5.]], dtype=float32)
    >>> x.flags['C_CONTIGUOUS']
    True

    Note: This function returns an array with at least one-dimension (1-d) 
    so it will not preserve 0-d arrays.  

    """
    return array(a, dtype, copy=False, order='C', ndmin=1) 
Example 26
Project: recruit   Author: Frank-qlu   File: numeric.py    License: Apache License 2.0 5 votes vote down vote up
def asfortranarray(a, dtype=None):
    """
    Return an array (ndim >= 1) laid out in Fortran order in memory.

    Parameters
    ----------
    a : array_like
        Input array.
    dtype : str or dtype object, optional
        By default, the data-type is inferred from the input data.

    Returns
    -------
    out : ndarray
        The input `a` in Fortran, or column-major, order.

    See Also
    --------
    ascontiguousarray : Convert input to a contiguous (C order) array.
    asanyarray : Convert input to an ndarray with either row or
        column-major memory order.
    require : Return an ndarray that satisfies requirements.
    ndarray.flags : Information about the memory layout of the array.

    Examples
    --------
    >>> x = np.arange(6).reshape(2,3)
    >>> y = np.asfortranarray(x)
    >>> x.flags['F_CONTIGUOUS']
    False
    >>> y.flags['F_CONTIGUOUS']
    True

    Note: This function returns an array with at least one-dimension (1-d) 
    so it will not preserve 0-d arrays.  

    """
    return array(a, dtype, copy=False, order='F', ndmin=1) 
Example 27
Project: recruit   Author: Frank-qlu   File: ops.py    License: Apache License 2.0 5 votes vote down vote up
def _flex_comp_method_FRAME(cls, op, special):
    str_rep = _get_opstr(op, cls)
    op_name = _get_op_name(op, special)
    default_axis = _get_frame_op_default_axis(op_name)

    def na_op(x, y):
        try:
            with np.errstate(invalid='ignore'):
                result = op(x, y)
        except TypeError:
            result = mask_cmp_op(x, y, op, (np.ndarray, ABCSeries))
        return result

    doc = _flex_comp_doc_FRAME.format(op_name=op_name,
                                      desc=_op_descriptions[op_name]['desc'])

    @Appender(doc)
    def f(self, other, axis=default_axis, level=None):

        other = _align_method_FRAME(self, other, axis)

        if isinstance(other, ABCDataFrame):
            # Another DataFrame
            if not self._indexed_same(other):
                self, other = self.align(other, 'outer',
                                         level=level, copy=False)
            return dispatch_to_series(self, other, na_op, str_rep)

        elif isinstance(other, ABCSeries):
            return _combine_series_frame(self, other, na_op,
                                         fill_value=None, axis=axis,
                                         level=level)
        else:
            assert np.ndim(other) == 0, other
            return self._combine_const(other, na_op)

    f.__name__ = op_name

    return f 
Example 28
Project: iGAN   Author: junyanz   File: iGAN_predict.py    License: MIT License 5 votes vote down vote up
def predict_z(gen_model, _predict, ims, batch_size=32):
    n = ims.shape[0]
    n_gen = 0
    zs = []
    n_batch = int(np.ceil(n / float(batch_size)))
    for i in range(n_batch):
        imb = gen_model.transform(ims[batch_size * i:min(n, batch_size * (i + 1)), :, :, :])
        zmb = _predict(imb)
        zs.append(zmb)
        n_gen += len(imb)
    zs = np.squeeze(np.concatenate(zs, axis=0))
    if np.ndim(zs) == 1:
        zs = zs[np.newaxis, :]

    return zs 
Example 29
Project: aboleth   Author: gradientinstitute   File: distributions.py    License: Apache License 2.0 5 votes vote down vote up
def norm_posterior(dim, std0, suffix=None):
    """Initialise a posterior (diagonal) Normal distribution.

    Parameters
    ----------
    dim : tuple or list
        the dimension of this distribution.
    std0 : float, np.array
        the initial (unoptimized) standard deviation of this distribution.
        Must be a scalar or have the same shape as dim.
    suffix : str
        suffix to add to the names of the variables of the parameters of this
        distribution.

    Returns
    -------
    Q : tf.distributions.Normal
        the initialised posterior Normal object.

    Note
    ----
    This will make tf.Variables on the mean standard deviation of the
    posterior. The initialisation of the mean is zero and the initialisation of
    the standard deviation is simply ``std0`` for each element.

    """
    assert (np.ndim(std0) == 0) or (np.shape(std0) == dim)
    mu_0 = tf.zeros(dim)
    mu = tf.Variable(mu_0, name=_add_suffix("W_mu_q", suffix))

    if np.ndim(std0) == 0:
        std0 = tf.ones(dim) * std0

    std = pos_variable(std0, name=_add_suffix("W_std_q", suffix))
    summary_histogram(mu)
    summary_histogram(std)

    Q = tf.distributions.Normal(loc=mu, scale=std)
    return Q 
Example 30
Project: formulas   Author: vinci1it2000   File: ranges.py    License: European Union Public License 1.1 5 votes vote down vote up
def set_value(self, rng, value=sh.EMPTY):
        self._value = sh.NONE
        self.ranges += rng,
        if value is not sh.EMPTY:
            if not isinstance(value, Array):
                if not np.ndim(value):
                    value = [[value]]
                value = np.asarray(value, object)
            shape = _shape(**rng)
            value = _reshape_array_as_excel(value, shape)
            self.values[rng['name']] = (rng, value)

        return self