Python numpy.prod() Examples

The following are 30 code examples of numpy.prod(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: densenet.py    From Recipes with MIT License 6 votes vote down vote up
def sample(self, shape):
        import numpy as np
        rng = lasagne.random.get_rng()
        if len(shape) >= 4:
            # convolutions use Gaussians with stddev of sqrt(2/fan_out), see
            # https://github.com/liuzhuang13/DenseNet/blob/cbb6bff/densenet.lua#L85-L86
            # and https://github.com/facebook/fb.resnet.torch/issues/106
            fan_out = shape[0] * np.prod(shape[2:])
            W = rng.normal(0, np.sqrt(2. / fan_out),
                           size=shape)
        elif len(shape) == 2:
            # the dense layer uses Uniform of range sqrt(1/fan_in), see
            # https://github.com/torch/nn/blob/651103f/Linear.lua#L21-L43
            fan_in = shape[0]
            W = rng.uniform(-np.sqrt(1. / fan_in), np.sqrt(1. / fan_in),
                            size=shape)
        return lasagne.utils.floatX(W) 
Example #2
Source File: count_weights.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def count_weights(scope=None, exclude=None, graph=None):
  """Count learnable parameters.

  Args:
    scope: Resrict the count to a variable scope.
    exclude: Regex to match variable names to exclude.
    graph: Operate on a graph other than the current default graph.

  Returns:
    Number of learnable parameters as integer.
  """
  if scope:
    scope = scope if scope.endswith('/') else scope + '/'
  graph = graph or tf.get_default_graph()
  vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
  if scope:
    vars_ = [var for var in vars_ if var.name.startswith(scope)]
  if exclude:
    exclude = re.compile(exclude)
    vars_ = [var for var in vars_ if not exclude.match(var.name)]
  shapes = [var.get_shape().as_list() for var in vars_]
  return int(sum(np.prod(shape) for shape in shapes)) 
Example #3
Source File: map_base.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def ensure_flat_frame(f, frame=None):
    def _wrapper_func(self, coords, **kwargs):
        if (frame is not None) and (coords.frame.name != frame):
            coords_transf = coords.transform_to(frame)
        else:
            coords_transf = coords

        is_array = not coords.isscalar
        if is_array:
            orig_shape = coords.shape
            shape_flat = (np.prod(orig_shape),)
            coords_transf = coords_to_shape(coords_transf, shape_flat)
        else:
            coords_transf = coords_to_shape(coords_transf, (1,))

        out = f(self, coords_transf, **kwargs)

        if is_array:
            out.shape = orig_shape + out.shape[1:]
        else:
            out = out[0]

        return out

    return _wrapper_func 
Example #4
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def print_layers(self, title=None, hide_layers_with_no_params=False):
        if title is None: title = self.name
        print()
        print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape'))
        print('%-28s%-12s%-24s%-24s' % (('---',) * 4))

        total_params = 0
        for layer_name, layer_output, layer_trainables in self.list_layers():
            weights = [var for var in layer_trainables if var.name.endswith('/weight:0')]
            num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables)
            total_params += num_params
            if hide_layers_with_no_params and num_params == 0:
                continue

            print('%-28s%-12s%-24s%-24s' % (
                layer_name,
                num_params if num_params else '-',
                layer_output.shape,
                weights[0].shape if len(weights) == 1 else '-'))

        print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
        print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', ''))
        print()

    # Construct summary ops to include histograms of all trainable parameters in TensorBoard. 
Example #5
Source File: count_weights.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def count_weights(scope=None, exclude=None, graph=None):
  """Count learnable parameters.

  Args:
    scope: Resrict the count to a variable scope.
    exclude: Regex to match variable names to exclude.
    graph: Operate on a graph other than the current default graph.

  Returns:
    Number of learnable parameters as integer.
  """
  if scope:
    scope = scope if scope.endswith('/') else scope + '/'
  graph = graph or tf.get_default_graph()
  vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
  if scope:
    vars_ = [var for var in vars_ if var.name.startswith(scope)]
  if exclude:
    exclude = re.compile(exclude)
    vars_ = [var for var in vars_ if not exclude.match(var.name)]
  shapes = [var.get_shape().as_list() for var in vars_]
  return int(sum(np.prod(shape) for shape in shapes)) 
Example #6
Source File: utils.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def ortho_init(scale=1.0):
    def _ortho_init(shape, dtype, partition_info=None):
        #lasagne ortho init for tf
        shape = tuple(shape)
        if len(shape) == 2:
            flat_shape = shape
        elif len(shape) == 4: # assumes NHWC
            flat_shape = (np.prod(shape[:-1]), shape[-1])
        else:
            raise NotImplementedError
        a = np.random.normal(0.0, 1.0, flat_shape)
        u, _, v = np.linalg.svd(a, full_matrices=False)
        q = u if u.shape == flat_shape else v # pick the one with the correct shape
        q = q.reshape(shape)
        return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
    return _ortho_init 
Example #7
Source File: util_scripts.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def generate_fake_images(run_id, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=1000, minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    for png_idx in range(num_pngs):
        print('Generating png %d / %d...' % (png_idx, num_pngs))
        latents = misc.random_latents(np.prod(grid_size), Gs, random_state=random_state)
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
        misc.save_image_grid(images, os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)), [0,255], grid_size)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()

#----------------------------------------------------------------------------
# Generate MP4 video of random interpolations using a previously trained network.
# To run, uncomment the appropriate line in config.py and launch train.py. 
Example #8
Source File: mpi_adam_optimizer.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def compute_gradients(self, loss, var_list, **kwargs):
        grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
        grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
        flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
        shapes = [v.shape.as_list() for g, v in grads_and_vars]
        sizes = [int(np.prod(s)) for s in shapes]

        num_tasks = self.comm.Get_size()
        buf = np.zeros(sum(sizes), np.float32)

        def _collect_grads(flat_grad):
            self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
            np.divide(buf, float(num_tasks), out=buf)
            return buf

        avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
        avg_flat_grad.set_shape(flat_grad.shape)
        avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
        avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
                    for g, (_, v) in zip(avg_grads, grads_and_vars)]

        return avg_grads_and_vars 
Example #9
Source File: initializations.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def get_fans(shape, dim_ordering='th'):
    if len(shape) == 2:
        fan_in = shape[0]
        fan_out = shape[1]
    elif len(shape) == 4 or len(shape) == 5:
        # assuming convolution kernels (2D or 3D).
        # TH kernel shape: (depth, input_depth, ...)
        # TF kernel shape: (..., input_depth, depth)
        if dim_ordering == 'th':
            receptive_field_size = np.prod(shape[2:])
            fan_in = shape[1] * receptive_field_size
            fan_out = shape[0] * receptive_field_size
        elif dim_ordering == 'tf':
            receptive_field_size = np.prod(shape[:2])
            fan_in = shape[-2] * receptive_field_size
            fan_out = shape[-1] * receptive_field_size
        else:
            raise ValueError('Invalid dim_ordering: ' + dim_ordering)
    else:
        # no specific assumptions
        fan_in = np.sqrt(np.prod(shape))
        fan_out = np.sqrt(np.prod(shape))
    return fan_in, fan_out 
Example #10
Source File: test_sparse_ndarray.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_sparse_nd_storage_fallback():
    def check_output_fallback(shape):
        ones = mx.nd.ones(shape)
        out = mx.nd.zeros(shape=shape, stype='csr')
        mx.nd.broadcast_add(ones, ones * 2, out=out)
        assert(np.sum(out.asnumpy() - 3) == 0)

    def check_input_fallback(shape):
        ones = mx.nd.ones(shape)
        out = mx.nd.broadcast_add(ones.tostype('csr'), ones.tostype('row_sparse'))
        assert(np.sum(out.asnumpy() - 2) == 0)

    def check_fallback_with_temp_resource(shape):
        ones = mx.nd.ones(shape)
        out = mx.nd.sum(ones)
        assert(out.asscalar() == np.prod(shape))

    shape = rand_shape_2d()
    check_output_fallback(shape)
    check_input_fallback(shape)
    check_fallback_with_temp_resource(shape) 
Example #11
Source File: models.py    From A2C with MIT License 6 votes vote down vote up
def ortho_weights(shape, scale=1.):
    """ PyTorch port of ortho_init from baselines.a2c.utils """
    shape = tuple(shape)

    if len(shape) == 2:
        flat_shape = shape[1], shape[0]
    elif len(shape) == 4:
        flat_shape = (np.prod(shape[1:]), shape[0])
    else:
        raise NotImplementedError

    a = np.random.normal(0., 1., flat_shape)
    u, _, v = np.linalg.svd(a, full_matrices=False)
    q = u if u.shape == flat_shape else v
    q = q.transpose().copy().reshape(shape)

    if len(shape) == 2:
        return torch.from_numpy((scale * q).astype(np.float32))
    if len(shape) == 4:
        return torch.from_numpy((scale * q[:, :shape[1], :shape[2]]).astype(np.float32)) 
Example #12
Source File: summary.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _cac_linear(layer, input, output):
    ic, oc = layer.in_features, layer.out_features

    tb_params = 0
    ntb__params = 0
    flops = 0
    if hasattr(layer, 'weight') and hasattr(layer.weight, 'shape'):
        params = np.prod(layer.weight.shape)
        t, n = _cac_grad_params(params, layer.weight)
        tb_params += t
        ntb__params += n
        flops += (2 * ic - 1) * oc
    if hasattr(layer, 'bias') and hasattr(layer.bias, 'shape'):
        params = np.prod(layer.bias.shape)
        t, n = _cac_grad_params(params, layer.bias)
        tb_params += t
        ntb__params += n
        flops += oc
    return tb_params, ntb__params, flops 
Example #13
Source File: summary.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _cac_xx_norm(layer, input, output):
    tb_params = 0
    ntb__params = 0
    if hasattr(layer, 'weight') and hasattr(layer.weight, 'shape'):
        params = np.prod(layer.weight.shape)
        t, n = _cac_grad_params(params, layer.weight)
        tb_params += t
        ntb__params += n
    if hasattr(layer, 'bias') and hasattr(layer.bias, 'shape'):
        params = np.prod(layer.bias.shape)
        t, n = _cac_grad_params(params, layer.bias)
        tb_params += t
        ntb__params += n
    if hasattr(layer, 'running_mean') and hasattr(layer.running_mean, 'shape'):
        params = np.prod(layer.running_mean.shape)
        ntb__params += params
    if hasattr(layer, 'running_var') and hasattr(layer.running_var, 'shape'):
        params = np.prod(layer.running_var.shape)
        ntb__params += params
    in_shape = input[0]
    flops = np.prod(in_shape.shape)
    if layer.affine:
        flops *= 2
    return tb_params, ntb__params, flops 
Example #14
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def fapply(f, x, tz=False):
    '''
    fapply(f,x) yields the result of applying f either to x, if x is a normal value or array, or to
      x.data if x is a sparse matrix. Does not modify x (unless f modifiex x).

    The optional argument tz (default: False) may be set to True to specify that, if x is a sparse 
    matrix that contains at least 1 element that is a sparse-zero, then f(0) should replace all the
    sparse-zeros in x (unless f(0) == 0).
    '''
    if sps.issparse(x):
        y = x.copy()
        y.data = f(x.data)
        if tz and y.getnnz() < np.prod(y.shape):
            z = f(np.array(0))
            if z != 0:
                y = y.toarray()
                y[y == 0] = z
        return y
    else: return f(x) 
Example #15
Source File: summary.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _cac_conv(layer, input, output):
    # bs, ic, ih, iw = input[0].shape
    oh, ow = output.shape[-2:]
    kh, kw = layer.kernel_size
    ic, oc = layer.in_channels, layer.out_channels
    g = layer.groups

    tb_params = 0
    ntb__params = 0
    flops = 0
    if hasattr(layer, 'weight') and hasattr(layer.weight, 'shape'):
        params = np.prod(layer.weight.shape)
        t, n = _cac_grad_params(params, layer.weight)
        tb_params += t
        ntb__params += n
        flops += (2 * ic * kh * kw - 1) * oh * ow * (oc // g)
    if hasattr(layer, 'bias') and hasattr(layer.bias, 'shape'):
        params = np.prod(layer.bias.shape)
        t, n = _cac_grad_params(params, layer.bias)
        tb_params += t
        ntb__params += n
        flops += oh * ow * (oc // g)
    return tb_params, ntb__params, flops 
Example #16
Source File: parameter.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def _finish_deferred_init(self):
        """Finishes deferred initialization."""
        if not self._deferred_init:
            return
        init, ctx, default_init, data = self._deferred_init
        self._deferred_init = ()
        assert self.shape is not None and np.prod(self.shape) > 0, \
            "Cannot initialize Parameter '%s' because it has " \
            "invalid shape: %s. Please specify in_units, " \
            "in_channels, etc for `Block`s."%(
                self.name, str(self.shape))

        with autograd.pause():
            if data is None:
                data = ndarray.zeros(shape=self.shape, dtype=self.dtype,
                                     ctx=context.cpu(), stype=self._stype)
                initializer.create(default_init)(
                    initializer.InitDesc(self.name, {'__init__': init}), data)

            self._init_impl(data, ctx) 
Example #17
Source File: ndarray.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def size(self):
        """Number of elements in the array.

        Equivalent to the product of the array's dimensions.

        Examples
        --------
        >>> import numpy as np
        >>> x = mx.nd.zeros((3, 5, 2))
        >>> x.size
        30
        >>> np.prod(x.shape)
        30
        """
        size = 1
        for i in self.shape:
            size *= i
        return size 
Example #18
Source File: metric.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred in zip(labels, preds):
            label = label.asnumpy()
            pred = pred.asnumpy()

            if len(label.shape) == 1:
                label = label.reshape(label.shape[0], 1)
            if len(pred.shape) == 1:
                pred = pred.reshape(pred.shape[0], 1)

            self.sum_metric += numpy.abs(label - pred).mean()
            self.num_inst += 1 # numpy.prod(label.shape) 
Example #19
Source File: metric.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred in zip(labels, preds):
            label = label.asnumpy()
            pred = pred.asnumpy()

            if len(label.shape) == 1:
                label = label.reshape(label.shape[0], 1)
            if len(pred.shape) == 1:
                pred = pred.reshape(pred.shape[0], 1)

            self.sum_metric += ((label - pred)**2.0).mean()
            self.num_inst += 1 # numpy.prod(label.shape) 
Example #20
Source File: utils.py    From lirpg with MIT License 6 votes vote down vote up
def ortho_init(scale=1.0):
    def _ortho_init(shape, dtype, partition_info=None):
        #lasagne ortho init for tf
        shape = tuple(shape)
        if len(shape) == 2:
            flat_shape = shape
        elif len(shape) == 4: # assumes NHWC
            flat_shape = (np.prod(shape[:-1]), shape[-1])
        else:
            raise NotImplementedError
        a = np.random.normal(0.0, 1.0, flat_shape)
        u, _, v = np.linalg.svd(a, full_matrices=False)
        q = u if u.shape == flat_shape else v # pick the one with the correct shape
        q = q.reshape(shape)
        return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
    return _ortho_init 
Example #21
Source File: initializer.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def _init_weight(self, name, arr):
        shape = arr.shape
        hw_scale = 1.
        if len(shape) < 2:
            raise ValueError('Xavier initializer cannot be applied to vector {0}. It requires at'
                             ' least 2D.'.format(name))
        if len(shape) > 2:
            hw_scale = np.prod(shape[2:])
        fan_in, fan_out = shape[1] * hw_scale, shape[0] * hw_scale
        factor = 1.
        if self.factor_type == "avg":
            factor = (fan_in + fan_out) / 2.0
        elif self.factor_type == "in":
            factor = fan_in
        elif self.factor_type == "out":
            factor = fan_out
        else:
            raise ValueError("Incorrect factor type")
        scale = np.sqrt(self.magnitude / factor)
        if self.rnd_type == "uniform":
            random.uniform(-scale, scale, out=arr)
        elif self.rnd_type == "gaussian":
            random.normal(0, scale, out=arr)
        else:
            raise ValueError("Unknown random type") 
Example #22
Source File: common_attention.py    From fine-lm with MIT License 5 votes vote down vote up
def make_2d_block_raster_mask(query_shape, memory_flange):
  """Creates a mask for 2d block raster scan.

  The query mask can look to the left, top left, top, and top right, but
  not to the right. Inside the query, we have the standard raster scan
  masking.
  Args:
    query_shape: A tuple of ints (query_height, query_width)
    memory_flange: A tuple of ints
      (memory_flange_height, memory_flange_width)

  Returns:
    A tensor of shape query_size, memory_size
  """
  # mask inside the query block
  query_triangle = common_layers.ones_matrix_band_part(
      np.prod(query_shape), np.prod(query_shape), -1, 0)
  split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)
  # adding mask for left and right
  mask_pieces = [
      tf.concat(
          [
              tf.ones([np.prod(query_shape), memory_flange[1]]),
              split_query_masks[i],
              tf.zeros([np.prod(query_shape), memory_flange[1]])
          ],
          axis=1) for i in range(query_shape[0])
  ]
  # adding mask for top
  final_mask = tf.concat(
      [
          tf.ones([
              np.prod(query_shape),
              (query_shape[1] + 2 * memory_flange[1]) * memory_flange[0]
          ]),
          tf.concat(mask_pieces, axis=1)
      ],
      axis=1)
  # 0.0 is visible location, 1.0 is masked.
  return 1. - final_mask 
Example #23
Source File: layers.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def flatten(x):
    return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])]) 
Example #24
Source File: depthest_trainer.py    From ACAN with MIT License 5 votes vote down vote up
def train(self):
        torch.backends.cudnn.benchmark = True
        if self.logdir:
            self.writer = SummaryWriter(self.logdir)
        else:
            raise Exception("Log dir doesn't exist!")
        # Calculate total step
        self.n_train = len(self.trainset)
        self.steps_per_epoch = np.ceil(self.n_train / self.batch_size).astype(np.int32)
        self.verbose = min(self.verbose, self.steps_per_epoch)
        self.n_steps = self.max_epochs * self.steps_per_epoch
        self.print("{0:<22s} : {1:} ".format('trainset sample', self.n_train))
        # calculate model parameters memory
        para = sum([np.prod(list(p.size())) for p in self.net.parameters()])
        memory = para * 4 / (1024**2)
        self.print('Model {} : params: {:,}, Memory {:.3f}MB'.format(self.net._get_name(), para, memory))
        # GO!!!!!!!!!
        start_time = time.time()
        self.train_total_time = 0
        self.time_sofar = 0
        for epoch in range(self.start_epoch, self.max_epochs + 1):
            # Train one epoch
            total_loss = self.train_epoch(epoch)
            torch.cuda.empty_cache()
            # Decay Learning Rate
            if self.params.scheduler in ['step', 'plateau']:
                self.scheduler.step()
            # Evaluate the model
            if self.eval_freq and epoch % self.eval_freq == 0:
                measures = self.eval(epoch)
                torch.cuda.empty_cache()
                for k in sorted(list(measures.keys())):
                    self.writer.add_scalar(k, measures[k], epoch)
        self.print("Finished training! Best epoch {} best acc {:.4f}".format(self.best_epoch, self.best_acc))
        self.print("Spend time: {:.2f}h".format((time.time() - start_time) / 3600))
        net_type = type(self.net).__name__
        best_pkl = os.path.join(self.logdir, '{}_{:03d}.pkl'.format(net_type, self.best_epoch))
        modify = os.path.join(self.logdir, 'best.pkl')
        shutil.copyfile(best_pkl, modify)
        return 
Example #25
Source File: optimize.py    From fine-lm with MIT License 5 votes vote down vote up
def log_variable_sizes(var_list=None, tag=None, verbose=False):
  """Log the sizes and shapes of variables, and the total size.

  Args:
    var_list: a list of variables; defaults to trainable_variables
    tag: a string; defaults to "Trainable Variables"
    verbose: bool, if True, log every weight; otherwise, log total size only.
  """
  if var_list is None:
    var_list = tf.trainable_variables()
  if tag is None:
    tag = "Trainable Variables"

  if not var_list:
    return

  name_to_var = {v.name: v for v in var_list}
  total_size = 0
  for v_name in sorted(list(name_to_var)):
    v = name_to_var[v_name]
    v_size = int(np.prod(np.array(v.shape.as_list())))
    if verbose:
      tf.logging.info("Weight    %s\tshape    %s\tsize    %d",
                      v.name[:-2].ljust(80),
                      str(v.shape).ljust(20), v_size)
    total_size += v_size
  tf.logging.info("%s Total size: %d", tag, total_size) 
Example #26
Source File: trainer.py    From ACAN with MIT License 5 votes vote down vote up
def train(self):
        """Do training, you can overload this function according to your need."""
        self.print("Log dir: {}".format(self.logdir))
        # Calculate total step
        self.n_train = len(self.trainset)
        self.steps_per_epoch = np.ceil(
            self.n_train / self.batch_size).astype(np.int32)
        self.verbose = min(self.verbose, self.steps_per_epoch)
        self.n_steps = self.max_epochs * self.steps_per_epoch
        # calculate model parameters memory
        para = sum([np.prod(list(p.size())) for p in self.net.parameters()])
        memory = para * 4 / 1000 / 1000
        self.print('Model {} : params: {:4f}M'.format(self.net._get_name(), memory))
        self.print('###### Experiment Parameters ######')
        for k, v in self.params.items():
            self.print('{0:<22s} : {1:}'.format(k, v))
        self.print("{0:<22s} : {1:} ".format('trainset sample', self.n_train))
        # GO!!!!!!!!!
        start_time = time.time()
        self.train_total_time = 0
        self.time_sofar = 0
        for epoch in range(self.start_epoch, self.max_epochs + 1):
            # Decay Learning Rate
            self.scheduler.step()
            # Train one epoch
            total_loss = self.train_epoch(epoch)
            torch.cuda.empty_cache()
            # Evaluate the model
            if self.eval_freq and epoch % self.eval_freq == 0:
                acc = self.eval(epoch)
                torch.cuda.empty_cache()
        self.print("Finished training! Best epoch {} best acc {}".format(self.best_epoch, self.best_acc))
        self.print("Spend time: {:.2f}h".format((time.time() - start_time) / 3600)) 
Example #27
Source File: logger.py    From cascade-rcnn_Pytorch with MIT License 5 votes vote down vote up
def histo_summary(self, tag, values, step, bins=1000):
        """Log a histogram of the tensor of values."""

        # Create a histogram using numpy
        counts, bin_edges = np.histogram(values, bins=bins)

        # Fill the fields of the histogram proto
        hist = tf.HistogramProto()
        hist.min = float(np.min(values))
        hist.max = float(np.max(values))
        hist.num = int(np.prod(values.shape))
        hist.sum = float(np.sum(values))
        hist.sum_squares = float(np.sum(values**2))

        # Drop the start of the first bin
        bin_edges = bin_edges[1:]

        # Add bin edges and counts
        for edge in bin_edges:
            hist.bucket_limit.append(edge)
        for c in counts:
            hist.bucket.append(c)

        # Create and write Summary
        summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
        self.writer.add_summary(summary, step)
        self.writer.flush() 
Example #28
Source File: initializations.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def orthogonal(shape, scale=1.1, name=None):
    ''' From Lasagne. Reference: Saxe et al., http://arxiv.org/abs/1312.6120
    '''
    flat_shape = (shape[0], np.prod(shape[1:]))
    a = np.random.normal(0.0, 1.0, flat_shape)
    u, _, v = np.linalg.svd(a, full_matrices=False)
    # pick the one with the correct shape
    q = u if u.shape == flat_shape else v
    q = q.reshape(shape)
    return K.variable(scale * q[:shape[0], :shape[1]], name=name) 
Example #29
Source File: trust_region.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def var_size(v):
  return int(np.prod([int(d) for d in v.shape])) 
Example #30
Source File: metrics_test.py    From fine-lm with MIT License 5 votes vote down vote up
def testSequenceAccuracyMetric(self):
    predictions = np.random.randint(4, size=(12, 12, 12, 1))
    targets = np.random.randint(4, size=(12, 12, 12, 1))
    expected = np.mean(
        np.prod((predictions == targets).astype(float), axis=(1, 2)))
    with self.test_session() as session:
      scores, _ = metrics.padded_sequence_accuracy(
          tf.one_hot(predictions, depth=4, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      a = tf.reduce_mean(scores)
      session.run(tf.global_variables_initializer())
      actual = session.run(a)
    self.assertEqual(actual, expected)