Python numpy.shape() Examples

The following are 30 code examples of numpy.shape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def zdivide(a, b, null=0):
    '''
    zdivide(a, b) returns the quotient a / b as a numpy array object. Unlike numpy's divide function
      or a/b syntax, zdivide will thread over the earliest dimension possible; thus if a.shape is
      (4,2) and b.shape is 4, zdivide(a,b) is a equivalent to [ai*zinv(bi) for (ai,bi) in zip(a,b)].

    The optional argument null (default: 0) may be given to specify that zeros in the arary b should
    instead be replaced with the given value in the result. Note that if this value is not equal to
    0, then any sparse array passed as argument b must be reified.

    The zdivide function never raises an error due to divide-by-zero; if you desire this behavior,
    use the divide function instead.

    Note that zdivide(a,b, null=z) is not quite equivalent to a*zinv(b, null=z) unless z is 0; if z
    is not zero, then the same elements that are zet to z in zinv(b, null=z) are set to z in the
    result of zdivide(a,b, null=z) rather than the equivalent element of a times z.
    '''
    (a,b) = unbroadcast(a,b)
    return czdivide(a,b, null=null) 
Example #2
Source File: custom_datasets.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, transform=None, target_transform=None, filename="adv_set_e_2.p", transp = False):
        """

        :param transform:
        :param target_transform:
        :param filename:
        :param transp: Set shuff= False for PGD based attacks
        :return:
        """
        self.transform = transform
        self.target_transform = target_transform
        self.adv_dict=pickle.load(open(filename,"rb"))
        self.adv_flat=self.adv_dict["adv_input"]
        self.num_adv=np.shape(self.adv_flat)[0]
        self.transp = transp
        self.sample_num = 0 
Example #3
Source File: custom_datasets.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, transform=None, target_transform=None, filename="adv_set_e_2.p", transp = False):
        """

        :param transform:
        :param target_transform:
        :param filename:
        :param transp: Set shuff= False for PGD based attacks
        :return:
        """
        self.transform = transform
        self.target_transform = target_transform
        self.adv_dict=pickle.load(open(filename,"rb"))
        self.adv_flat=self.adv_dict["adv_input"]
        self.num_adv=np.shape(self.adv_flat)[0]
        self.shuff = transp
        self.sample_num = 0 
Example #4
Source File: custom_datasets.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, transform=None, target_transform=None, filename="adv_set_e_2.p", transp = False):
        """

        :param transform:
        :param target_transform:
        :param filename:
        :param transp: Set shuff= False for PGD based attacks
        :return:
        """
        self.transform = transform
        self.target_transform = target_transform
        self.adv_dict=pickle.load(open(filename,"rb"))
        self.adv_flat=self.adv_dict["adv_input"]
        self.num_adv=np.shape(self.adv_flat)[0]
        self.shuff = transp
        self.sample_num = 0 
Example #5
Source File: adaptive_attacks.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def binary_refinement(sess,Best_X_adv,
                      X_adv, Y, ALPHA, ub, lb, model, dataset='cifar'):
    num_samples = np.shape(X_adv)[0]
    print(dataset)
    if(dataset=="mnist"):
        X_place = tf.placeholder(tf.float32, shape=[1, 1, 28, 28])
    else:
        X_place = tf.placeholder(tf.float32, shape=[1, 3, 32, 32])

    pred = model(X_place)
    for i in range(num_samples):
        logits_op = sess.run(pred,feed_dict={X_place:X_adv[i:i+1,:,:,:]})
        if(not np.argmax(logits_op) == np.argmax(Y[i,:])):
            # Success, increase alpha
            Best_X_adv[i,:,:,:] = X_adv[i,:,:,]
            lb[i] = ALPHA[i,0]
        else:
            ub[i] = ALPHA[i,0]
        ALPHA[i] = 0.5*(lb[i] + ub[i])
    return ALPHA, Best_X_adv 
Example #6
Source File: metrics.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def accuracy(logits, labels):
  """
  Return accuracy of the array of logits (or label predictions) wrt the labels
  :param logits: this can either be logits, probabilities, or a single label
  :param labels: the correct labels to match against
  :return: the accuracy as a float
  """
  assert len(logits) == len(labels)

  if len(np.shape(logits)) > 1:
    # Predicted labels are the argmax over axis 1
    predicted_labels = np.argmax(logits, axis=1)
  else:
    # Input was already labels
    assert len(np.shape(logits)) == 1
    predicted_labels = logits

  # Check against correct labels to compute correct guesses
  correct = np.sum(predicted_labels == labels.reshape(len(labels)))

  # Divide by number of labels to obtain accuracy
  accuracy = float(correct) / len(labels)

  # Return float value
  return accuracy 
Example #7
Source File: aggregation.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def labels_from_probs(probs):
  """
  Helper function: computes argmax along last dimension of array to obtain
  labels (max prob or max logit value)
  :param probs: numpy array where probabilities or logits are on last dimension
  :return: array with same shape as input besides last dimension with shape 1
          now containing the labels
  """
  # Compute last axis index
  last_axis = len(np.shape(probs)) - 1

  # Label is argmax over last dimension
  labels = np.argmax(probs, axis=last_axis)

  # Return as np.int32
  return np.asarray(labels, dtype=np.int32) 
Example #8
Source File: mxnet_export_test.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_square():
    input1 = np.random.randint(1, 10, (2, 3)).astype("float32")

    ipsym = mx.sym.Variable("input1")
    square = mx.sym.square(data=ipsym)
    model = mx.mod.Module(symbol=square, data_names=['input1'], label_names=None)
    model.bind(for_training=False, data_shapes=[('input1', np.shape(input1))], label_shapes=None)
    model.init_params()

    args, auxs = model.get_params()
    params = {}
    params.update(args)
    params.update(auxs)

    converted_model = onnx_mxnet.export_model(square, params, [np.shape(input1)], np.float32, "square.onnx")

    sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
    result = forward_pass(sym, arg_params, aux_params, ['input1'], input1)

    numpy_op = np.square(input1)

    npt.assert_almost_equal(result, numpy_op) 
Example #9
Source File: images.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def parse_dataobj(self, dataobj, hdat={}):
        # first, see if we have a specified shape/size
        ish = next((hdat[k] for k in ('image_size', 'image_shape', 'shape') if k in hdat), None)
        if ish is Ellipsis: ish = None
        # make a numpy array of the appropriate dtype
        dtype = self.parse_type(hdat, dataobj=dataobj)
        try:    dataobj = dataobj.dataobj
        except Exception: pass
        if   dataobj is not None: arr = np.asarray(dataobj).astype(dtype)
        elif ish:                 arr = np.zeros(ish,       dtype=dtype)
        else:                     arr = np.zeros([1,1,1,0], dtype=dtype)
        # reshape to the requested shape if need-be
        if ish and ish != arr.shape: arr = np.reshape(arr, ish)
        # then reshape to a valid (4D) shape
        sh = arr.shape
        if   len(sh) == 2: arr = np.reshape(arr, (sh[0], 1, 1, sh[1]))
        elif len(sh) == 1: arr = np.reshape(arr, (sh[0], 1, 1))
        elif len(sh) == 3: arr = np.reshape(arr, sh)
        elif len(sh) != 4: raise ValueError('Cannot convert n-dimensional array to image if n > 4')
        # and return
        return arr 
Example #10
Source File: images.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def image_array_to_spec(arr):
    '''
    image_array_to_spec(arr) yields an image-spec that is appropriate for the given array. The 
      default image spec for an array is a FreeSurfer-like affine transformation with a translation
      that puts the origin at the center of the array. The upper-right 3x3 matrix for this
      transformation is [[-1,0,0], [0,0,1], [0,-1,0]].
    image_array_to_spec((i,j,k)) uses (i,j,k) as the shape of the image array.
    image_array_to_spec(image) uses the array from the given image but not the affine matrix.
    image_array_to_spec(spec) uses the image shape from the given image spec but not the affine
      matrix.
    '''
    sh   = image_shape(arr)[:3]
    (i0,j0,k0) = np.asarray(sh) * 0.5
    ijk0 = (i0, -k0, j0)
    aff  = to_affine(([[-1,0,0],[0,0,1],[0,-1,0]], ijk0), 3)
    return {'image_shape':sh, 'affine':aff} 
Example #11
Source File: images.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def image_reslice(image, spec, method=None, fill=0, dtype=None, weights=None, image_type=None):
    '''
    image_reslice(image, spec) yields a duplicate of the given image resliced to have the voxels
      indicated by the given image spec. Note that spec may be an image itself.

    Optional arguments that can be passed to image_interpolate() (asside from affine) are allowed
    here and are passed through.
    '''
    if image_type is None and is_image(image): image_type = to_image_type(image)
    spec = to_image_spec(spec)
    image = to_image(image)
    # we make a big mesh and interpolate at these points...
    imsh = spec['image_shape']
    (args, kw) = ([np.arange(n) for n in imsh[:3]], {'indexing': 'ij'})
    ijk = np.asarray([u.flatten() for u in np.meshgrid(*args, **kw)])
    ijk = np.dot(spec['affine'], np.vstack([ijk, np.ones([1,ijk.shape[1]])]))[:3]
    # interpolate here...
    u = image_interpolate(image, ijk, method=method, fill=fill, dtype=dtype, weights=weights)
    return to_image((np.reshape(u, imsh), spec), image_type=image_type) 
Example #12
Source File: _op_translations.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def convert_string_to_list(string_val):
    """Helper function to convert string to list.
     Used to convert shape attribute string to list format.
    """
    result_list = []

    list_string = string_val.split(',')
    for val in list_string:
        val = str(val.strip())
        val = val.replace("(", "")
        val = val.replace(")", "")
        val = val.replace("L", "")
        val = val.replace("[", "")
        val = val.replace("]", "")
        if val not in ("", "None"):
            result_list.append(int(val))

    return result_list 
Example #13
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def inner(a,b):
    '''
    inner(a,b) yields the dot product of a and b, doing so in a fashion that respects sparse
      matrices when encountered. This does not error check for bad dimensionality.

    If a or b are constants, then the result is just the a*b; if a and b are both vectors or both
    matrices, then the inner product is dot(a,b); if a is a vector and b is a matrix, this is
    equivalent to as if a were a matrix with 1 row; and if a is a matrix and b a vector, this is
    equivalent to as if b were a matrix with 1 column.
    '''
    if   sps.issparse(a): return a.dot(b)
    else: a = np.asarray(a)
    if len(a.shape) == 0: return a*b
    if sps.issparse(b):
        if len(a.shape) == 1: return b.T.dot(a)
        else:                 return b.T.dot(a.T).T
    else: b = np.asarray(b)
    if len(b.shape) == 0: return a*b
    if len(a.shape) == 1 and len(b.shape) == 2: return np.dot(b.T, a)
    else: return np.dot(a,b) 
Example #14
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def ctimes(*args):
    '''
    ctimes(a, b...) returns the product of all the values as a numpy array object. Like numpy's
      multiply function or a*b syntax, times will thread over the latest dimension possible; thus
      if a.shape is (4,2) and b.shape is 2, times(a,b) is a equivalent to a * b.

    Unlike numpy's multiply function, ctimes works with sparse matrices and will reify them.
    '''
    n = len(args)
    if   n == 0: return np.asarray(0)
    elif n == 1: return np.asarray(args[0])
    elif n >  2: return reduce(plus, args)
    (a,b) = args
    if   sps.issparse(a): return a.multiply(b)
    elif sps.issparse(b): return b.multiply(a)
    else:                 return np.asarray(a) * b 
Example #15
Source File: _op_translations.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def convert_floor(node, **kwargs):
    """Map MXNet's floor operator attributes to onnx's Floor operator
    and return the created node.
    """
    onnx = import_onnx_modules()
    name = node["name"]
    proc_nodes = kwargs["proc_nodes"]
    inputs = node["inputs"]

    input_node_id = kwargs["index_lookup"][inputs[0][0]]
    input_node = proc_nodes[input_node_id].name

    node = onnx.helper.make_node(
        "Floor",
        [input_node],
        [name],
        name=name
    )
    return [node]

# Changing shape and type. 
Example #16
Source File: TensorFlowInterface.py    From IntroToDeepLearning with MIT License 5 votes vote down vote up
def max_pool(x,shape,name=None):
	# return an op that performs max pooling across a 2D image
	return tf.nn.max_pool(x,ksize=[1]+shape+[1],strides=[1]+shape+[1],padding='SAME',name=name) 
Example #17
Source File: TensorFlowInterface.py    From IntroToDeepLearning with MIT License 5 votes vote down vote up
def max_pool3d(x,shape,name=None):
	# return an op that performs max pooling across a 2D image
	return tf.nn.max_pool3d(x,ksize=[1]+shape+[1],strides=[1]+shape+[1],padding='SAME',name=name) 
Example #18
Source File: mxnet_export_test.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_model_accuracy(model_name, input_shape):
    """ Imports ONNX model, runs inference, exports and imports back
        run inference, compare result with the previous inference result"""
    model_path, inputs, outputs = get_test_files(model_name)
    logging.info("Translating model from ONNX model zoo to Mxnet")
    sym, arg_params, aux_params = onnx_mxnet.import_model(model_path)

    metadata = onnx_mxnet.get_model_metadata(model_path)
    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]

    expected_result= []
    for input_data, output_data in zip(inputs, outputs):
        result = forward_pass(sym, arg_params, aux_params, data_names, input_data)
        expected_result.append(result)

    params = {}
    params.update(arg_params)
    params.update(aux_params)

    dir_path = os.path.dirname(model_path)
    new_model_name = "exported_" + model_name + ".onnx"
    onnx_file = os.path.join(dir_path, new_model_name)

    logging.info("Translating converted model from mxnet to ONNX")
    converted_model_path = onnx_mxnet.export_model(sym, params, [input_shape], np.float32,
                                                   onnx_file)

    sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model_path)

    metadata = onnx_mxnet.get_model_metadata(converted_model_path)
    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]

    actual_result = []
    for input_data, output_data in zip(inputs, outputs):
        result = forward_pass(sym, arg_params, aux_params, data_names, input_data)
        actual_result.append(result)

    # verify the results
    for expected, actual in zip(expected_result, actual_result):
        npt.assert_equal(expected.shape, actual.shape)
        npt.assert_almost_equal(expected, actual, decimal=3) 
Example #19
Source File: TensorFlowInterface.py    From IntroToDeepLearning with MIT License 5 votes vote down vote up
def __init__(self,input,shape,name,strides=[1,1,1,1,1],std=1.0,bias=0.1):
		super(Conv3D,self).__init__(input,shape,name,strides,std,bias) 
Example #20
Source File: VAE.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def encoder(model, x):
        params = model.arg_params
        encoder_n = np.shape(params['encoder_h_bias'].asnumpy())[0]
        encoder_h = np.dot(params['encoder_h_weight'].asnumpy(), np.transpose(x)) \
                    + np.reshape(params['encoder_h_bias'].asnumpy(), (encoder_n,1))
        act_h = np.tanh(encoder_h)
        mu = np.transpose(np.dot(params['mu_weight'].asnumpy(),act_h)) + params['mu_bias'].asnumpy()
        logvar = np.transpose(np.dot(params['logvar_weight'].asnumpy(),act_h)) + params['logvar_bias'].asnumpy()
        return mu,logvar 
Example #21
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def load_nifti(filename, to='auto'):
    '''
    load_nifti(filename) yields the Nifti1Image or Nifti2Image referened by the given filename by
      using the nibabel load function.
    
    The optional argument to may be used to coerce the resulting data to a particular format; the
    following arguments are understood:
      * 'header' will yield just the image header
      * 'data' will yield the image's data-array
      * 'field' will yield a squeezed version of the image's data-array and will raise an error if
        the data object has more than 2 non-unitary dimensions (appropriate for loading surface
        properties stored in image files)
      * 'affine' will yield the image's affine transformation
      * 'image' will yield the raw image object
      * 'auto' is equivalent to 'image' unless the image has no more than 2 non-unitary dimensions,
        in which case it is assumed to be a surface-field and the return value is equivalent to
        the 'field' value.
    '''
    img = nib.load(filename)
    to = to.lower()
    if to == 'image':    return img
    elif to == 'data':   return img.dataobj
    elif to == 'affine': return img.affine
    elif to == 'header': return img.header
    elif to == 'field':
        dat = np.squeeze(np.asarray(img.dataobj))
        if len(dat.shape) > 2:
            raise ValueError('image requested as field has more than 2 non-unitary dimensions')
        return dat
    elif to in ['auto', 'automatic']:
        dims = set(np.shape(img.dataobj))
        if 1 < len(dims) < 4 and 1 in dims:
            return np.squeeze(np.asarray(img.dataobj))
        else:
            return img
    else:
        raise ValueError('unrecognized \'to\' argument \'%s\'' % to) 
Example #22
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def curve_intersection(c1, c2, grid=16):
    '''
    curve_intersect(c1, c2) yields the parametric distances (t1, t2) such that c1(t1) == c2(t2).
      
    The optional parameter grid may specify the number of grid-points
    to use in the initial search for a start-point (default: 16).
    '''
    from scipy.optimize import minimize
    from neuropythy.geometry import segment_intersection_2D
    if c1.coordinates.shape[1] > c2.coordinates.shape[1]:
        (t1,t2) = curve_intersection(c2, c1, grid=grid)
        return (t2,t1)
    # before doing a search, see if there are literal exact intersections of the segments
    x1s  = c1.coordinates.T
    x2s  = c2.coordinates
    for (ts,te,xs,xe) in zip(c1.t[:-1], c1.t[1:], x1s[:-1], x1s[1:]):
        pts = segment_intersection_2D((xs,xe), (x2s[:,:-1], x2s[:,1:]))
        ii = np.where(np.isfinite(pts[0]))[0]
        if len(ii) > 0:
            ii = ii[0]
            def f(t): return np.sum((c1(t[0]) - c2(t[1]))**2)
            t01 = 0.5*(ts + te)
            t02 = 0.5*(c2.t[ii] + c2.t[ii+1])
            (t1,t2) = minimize(f, (t01, t02)).x
            return (t1,t2)
    if pimms.is_vector(grid): (ts1,ts2) = [c.t[0] + (c.t[-1] - c.t[0])*grid for c in (c1,c2)]
    else:                     (ts1,ts2) = [np.linspace(c.t[0], c.t[-1], grid) for c in (c1,c2)]
    (pts1,pts2) = [c(ts) for (c,ts) in zip([c1,c2],[ts1,ts2])]
    ds = np.sqrt([np.sum((pts2.T - pp)**2, axis=1) for pp in pts1.T])
    (ii,jj) = np.unravel_index(np.argmin(ds), ds.shape)
    (t01,t02) = (ts1[ii], ts2[jj])
    ttt = []
    def f(t): return np.sum((c1(t[0]) - c2(t[1]))**2)
    (t1,t2) = minimize(f, (t01, t02)).x
    return (t1,t2) 
Example #23
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def __repr__(self):
        return 'CurveSpline(<%d points>, order=%d, %f <= t <= %f)' % (
            self.coordinates.shape[1],
            self.order, self.t[0], self.t[-1]) 
Example #24
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def t(distances,coordinates):
        n = coordinates.shape[1]
        if distances is None: distances = np.ones(n - 1)
        t = np.cumsum(np.pad(distances, (1,0), 'constant'))
        t.setflags(write=False)
        return t 
Example #25
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def check_distances(distances, coordinates, periodic):
        if distances is None: return True
        if len(distances) != coordinates.shape[1] - 1:
            raise ValueError('Distances must be diffs of coordinates')
        return True 
Example #26
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def coordinates(x):
        'curve.coordinates is the seed coordinate matrix for the given curve.'
        x = np.asarray(x)
        assert(len(x.shape) == 2)
        if x.shape[0] != 2: x = x.T
        assert(x.shape[0] == 2)
        return pimms.imm_array(x) 
Example #27
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def nanlog(x, null=np.nan):
    '''
    nanlog(x) is equivalent to numpy.log(x) except that it avoids calling log on 0 and non-finie
      values; in place of these values, it returns the value null (which is nan by default).
    '''
    x = np.asarray(x)
    ii0 = np.where(np.isfinite(x))
    ii  = np.where(x[ii0] > 0)[0]
    if len(ii) == numel(x): return np.log(x)
    res = np.full(x.shape, null)
    ii = tuple([u[ii] for u in ii0])
    res[ii] = np.log(x[ii])
    return res 
Example #28
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def part(x, *args):
    '''
    part(x, ii, jj...) is equivalent to x[ii, jj...] if x is a sparse matrix or numpy array and is
      equivalent to np.asarray(x)[ii][:, jj][...] if x is not. If only one argument is passed and
      it is a tuple, then it is passed like x[ii] alone.

    The part function is comparible with slices (though the must be entered using the slice(...)
    rather than the : syntax) and Ellipsis.
    '''
    n = len(args)
    sl = slice(None)
    if sps.issparse(x):
        if n == 1: return x[args[0]]
        elif n > 2: raise ValueError('Too many indices for sparse matrix')
        (ii,jj) = args
        if   ii is Ellipsis: ii = sl
        elif jj is Ellipsis: jj = sl
        ni = pimms.is_number(ii)
        nj = pimms.is_number(jj)
        if   ni and nj: return x[ii,jj]
        elif ni:        return x[ii,jj].toarray()[0]
        elif nj:        return x[ii,jj].toarray()[:,0]
        else:           return x[ii][:,jj]
    else:
        x = np.asarray(x)
        if n == 1: return x[args[0]]
        i0 = []
        for (k,arg) in enumerate(args):
            if arg is Ellipsis:
                # special case...
                #if Ellipsis in args[ii+1:]: raise ValueError('only one ellipsis allowed per part')
                left = n - k - 1
                i0 = [sl for _ in range(len(x.shape) - left)]
            else:
                x = x[tuple(i0 + [arg])]
                if not pimms.is_number(arg): i0.append(sl)
        return x 
Example #29
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def flatter(x, k=1):
    '''
    flatter(x) yields a numpy array equivalent to x but whose first dimension has been flattened.
    flatter(x, k) yields a numpy array whose first k dimensions have been flattened; if k is
      negative, the last k dimensions are flattened. If np.inf or -np.inf is passed, then this is
      equivalent to flattest(x). Note that flatter(x) is equivalent to flatter(x,1).
    flatter(x, 0) yields x.
    '''
    if k == 0: return x
    x = x.toarray() if sps.issparse(x) else np.asarray(x)
    if len(x.shape) - abs(k) < 2: return x.flatten()
    k += np.sign(k)
    if k > 0: return np.reshape(x, (-1,) + x.shape[k:])
    else:     return np.reshape(x, x.shape[:k] + (-1,)) 
Example #30
Source File: TensorFlowInterface.py    From IntroToDeepLearning with MIT License 5 votes vote down vote up
def initialize(self,std=1.0,bias=0.1):
		with tf.variable_scope(self.name):
			self.W = weightVariable(self.shape,std=std)		# YxX patch, Z contrast, outputs to N neurons
			self.b = biasVariable([self.shape[-1]],bias=bias)	# N bias variables to go with the N neurons