Python numpy.append() Examples

The following are 30 code examples of numpy.append(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_operator_gpu.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_embedding_with_type():
    def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
        NVD = [[20, 10, 20], [200, 10, 300]]
        for N, V, D in NVD:
            sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
            ctx_list = []
            for data_type in data_types:
                for weight_type in weight_types:
                    ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
                        'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
                    ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
                        'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
            arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
            check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
                              arg_params=arg_params)

    data_types = [np.float16, np.float32, np.float64, np.int32]
    weight_types = [np.float16, np.float32, np.float64]
    test_embedding_helper(data_types, weight_types, 5, 5)
    data_types = [np.uint8]
    weight_types = [np.float16, np.float32, np.float64]
    test_embedding_helper(data_types, weight_types, 0, 5) 
Example #2
Source File: base.py    From pymesh with MIT License 6 votes vote down vote up
def join(self, another):
        """

        :param m: BaseMesh
        :return:
        """
        if another is None:
            raise AttributeError("another BaseMesh instance is required")

        if not isinstance(another, BaseMesh):
            raise TypeError("anther must be an instance of BaseMesh")

        self.data = numpy.append(self.data, another.data)
        self.normals = numpy.append(self.normals, another.normals, axis=0)
        self.vectors = numpy.append(self.vectors, another.vectors, axis=0)
        self.attr = numpy.append(self.attr, another.attr, axis=0)
        return self 
Example #3
Source File: gtf_utils.py    From models with MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1, 2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1

        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0, 1] - self.exons[0, 0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i * 2 - 1] = self.exons[i, 0] - self.exons[i - 1, 1] - 1
            self.seglen[i * 2] = self.exons[i, 1] - self.exons[i, 0] + 1

        if ["-", "-1", "0", 0, -1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example #4
Source File: ModelingCloth.py    From Modeling-Cloth with MIT License 6 votes vote down vote up
def zxy_grid(co_y, tymin, tymax, subs, c, t, c_peat, t_peat):
    # create linespace grid between bottom and top of tri z
    #subs = 7
    t_min = np.min(tymin)
    t_max = np.max(tymax)
    divs = np.linspace(t_min, t_max, num=subs, dtype=np.float32)            
    
    # figure out which triangles and which co are in each section
    co_bools = (co_y > divs[:-1][:, nax]) & (co_y < divs[1:][:, nax])
    tri_bools = (tymin < divs[1:][:, nax]) & (tymax > divs[:-1][:, nax])

    for i, j in zip(co_bools, tri_bools):
        if (np.sum(i) > 0) & (np.sum(j) > 0):
            c3 = c[i]
            t3 = t[j]
        
            c_peat.append(np.repeat(c3, t3.shape[0]))
            t_peat.append(np.tile(t3, c3.shape[0])) 
Example #5
Source File: dataloader.py    From models with MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1,2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1


        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0,1]-self.exons[0,0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i*2-1] = self.exons[i,0]-self.exons[i-1,1] - 1
            self.seglen[i*2] = self.exons[i,1]-self.exons[i,0] + 1

        if ["-","-1","0",0,-1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example #6
Source File: gtf_utils.py    From models with MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1, 2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1

        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0, 1] - self.exons[0, 0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i * 2 - 1] = self.exons[i, 0] - self.exons[i - 1, 1] - 1
            self.seglen[i * 2] = self.exons[i, 1] - self.exons[i, 0] + 1

        if ["-", "-1", "0", 0, -1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example #7
Source File: TargetList.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def loadAliasFile(self):
        """
        Args:
        Returns:
            alias ():
                list 
        """
        #OLD aliasname = 'alias_4_11_2019.pkl'
        aliasname = 'alias_10_07_2019.pkl'
        tmp1 = inspect.getfile(self.__class__).split('/')[:-2]
        tmp1.append('util')
        self.classpath = '/'.join(tmp1)
        #self.classpath = os.path.split(inspect.getfile(self.__class__))[0]
        #vprint(inspect.getfile(self.__class__))
        self.alias_datapath = os.path.join(self.classpath, aliasname)
        #Load pkl and outspec files
        try:
            with open(self.alias_datapath, 'rb') as f:#load from cache
                alias = pickle.load(f, encoding='latin1')
        except:
            vprint('Failed to open fullPathPKL %s'%self.alias_datapath)
            pass
        return alias
    ########################################################## 
Example #8
Source File: dataloader.py    From models with MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1,2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1


        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0,1]-self.exons[0,0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i*2-1] = self.exons[i,0]-self.exons[i-1,1] - 1
            self.seglen[i*2] = self.exons[i,1]-self.exons[i,0] + 1

        if ["-","-1","0",0,-1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example #9
Source File: TargetList.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def stellar_mass(self):
        """Populates target list with 'true' and 'approximate' stellar masses
        
        This method calculates stellar mass via the formula relating absolute V
        magnitude and stellar mass.  The values are in units of solar mass.

        Function called by reset sim
        
        """
        
        # 'approximate' stellar mass
        self.MsEst = (10.**(0.002456*self.MV**2 - 0.09711*self.MV + 0.4365))*u.solMass
        # normally distributed 'error'
        err = (np.random.random(len(self.MV))*2. - 1.)*0.07
        self.MsTrue = (1. + err)*self.MsEst
        
        # if additional filters are desired, need self.catalog_atts fully populated
        if not hasattr(self.catalog_atts,'MsEst'):
            self.catalog_atts.append('MsEst')
        if not hasattr(self.catalog_atts,'MsTrue'):
            self.catalog_atts.append('MsTrue') 
Example #10
Source File: gtf_utils.py    From models with MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1, 2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1

        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0, 1] - self.exons[0, 0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i * 2 - 1] = self.exons[i, 0] - self.exons[i - 1, 1] - 1
            self.seglen[i * 2] = self.exons[i, 1] - self.exons[i, 0] + 1

        if ["-", "-1", "0", 0, -1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example #11
Source File: create_encodings.py    From Face-Recognition with MIT License 6 votes vote down vote up
def create_dataset(training_dir_path, labels):
    X = []
    for i in _zipped_folders_labels_images(training_dir_path, labels):
        for fileName in i[2]:
            file_path = os.path.join(i[0], fileName)
            img = face_recognition_api.load_image_file(file_path)
            imgEncoding = face_recognition_api.face_encodings(img)

            if len(imgEncoding) > 1:
                print('\x1b[0;37;43m' + 'More than one face found in {}. Only considering the first face.'.format(file_path) + '\x1b[0m')
            if len(imgEncoding) == 0:
                print('\x1b[0;37;41m' + 'No face found in {}. Ignoring file.'.format(file_path) + '\x1b[0m')
            else:
                print('Encoded {} successfully.'.format(file_path))
                X.append(np.append(imgEncoding[0], i[1]))
    return X 
Example #12
Source File: buffers_of_buffers.py    From PyOptiX with MIT License 6 votes vote down vote up
def create_random_buffer(max_width, max_height):
    scale = randf()
    w = int(max(max_width * scale, 1))
    h = int(max(max_height * scale, 1))

    arr = []
    red, green, blue = randf(), randf(), randf()

    for y in range(h):
        arr.append([])
        for x in range(w):
            if randf() < 0.1:
                arr[y].append([red * 255.0, green * 255.0, blue * 255.0, 255])
            else:
                arr[y].append([255, 255, 255, 0])

    return Buffer.from_array(np.array(arr, dtype=np.uint8), buffer_type='i', drop_last_dim=True) 
Example #13
Source File: test_operator_gpu.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_elementwisesum_with_type():
    dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
                 [mx.cpu(0), [np.float64, np.float32]] ]
    for num_args in range(1, 6):
        ews_arg_shape = {}
        for i in range(num_args):
            ews_arg_shape['ews_arg'+str(i)] = (2, 10)
        sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
        ctx_list = []
        for dev, types in dev_types:
            for dtype in types:
                ews_arg_dtype = {'type_dict':{}}
                for i in range(num_args):
                    ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
                ctx_elem = {'ctx': dev}
                ctx_elem.update(ews_arg_shape)
                ctx_elem.update(ews_arg_dtype)
                ctx_list.append(ctx_elem)
    check_consistency(sym, ctx_list) 
Example #14
Source File: filters.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def __call__(self, x, update=True):
        self.stack.append(x)
        while len(self.stack) < self.stack.maxlen:
            self.stack.append(x)
        return np.concatenate(self.stack, axis=-1) 
Example #15
Source File: filters.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def __call__(self, x, update=True):
        return np.append(x, self.count/100.0) 
Example #16
Source File: TargetList.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setOfStarsWithKnownPlanets(self, data):
        """ From the data dict created in this script, this method extracts the set of unique star names
        Args:
            data (dict):
                dict containing the pl_hostname of each star
        Returns:
            list (list):
                list of star names with a known planet

        """
        starNames = list()
        for i in np.arange(len(data)):
            starNames.append(data[i]['pl_hostname'])
        return list(set(starNames)) 
Example #17
Source File: trpo_mpi.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def add_vtarg_and_adv(seg, gamma, lam):
    new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
    vpred = np.append(seg["vpred"], seg["nextvpred"])
    T = len(seg["rew"])
    seg["adv"] = gaelam = np.empty(T, 'float32')
    rew = seg["rew"]
    lastgaelam = 0
    for t in reversed(range(T)):
        nonterminal = 1-new[t+1]
        delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
        gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
    seg["tdlamret"] = seg["adv"] + seg["vpred"] 
Example #18
Source File: trpo_mpi.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def add_vtarg_and_adv(seg, gamma, lam):
    new = np.append(seg["new"], 0)  # last element is only used for last vtarg, but we already zeroed it if last new = 1
    vpred = np.append(seg["vpred"], seg["nextvpred"])
    T = len(seg["rew"])
    seg["adv"] = gaelam = np.empty(T, 'float32')
    rew = seg["rew"]
    lastgaelam = 0
    for t in reversed(range(T)):
        nonterminal = 1-new[t+1]
        delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
        gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
    seg["tdlamret"] = seg["adv"] + seg["vpred"] 
Example #19
Source File: TargetList.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fgk_filter(self):
        """Includes only F, G, K spectral type stars in Target List
        
        """
        
        spec = np.array(list(map(str, self.Spec)))
        iF = np.where(np.core.defchararray.startswith(spec, 'F'))[0]
        iG = np.where(np.core.defchararray.startswith(spec, 'G'))[0]
        iK = np.where(np.core.defchararray.startswith(spec, 'K'))[0]
        i = np.append(np.append(iF, iG), iK)
        i = np.unique(i)
        self.revise_lists(i) 
Example #20
Source File: pposgd_simple.py    From lirpg with MIT License 5 votes vote down vote up
def add_vtarg_and_adv(seg, gamma, lam):
    """
    Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
    """
    new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
    vpred = np.append(seg["vpred"], seg["nextvpred"])
    T = len(seg["rew"])
    seg["adv"] = gaelam = np.empty(T, 'float32')
    rew = seg["rew"]
    lastgaelam = 0
    for t in reversed(range(T)):
        nonterminal = 1-new[t+1]
        delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
        gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
    seg["tdlamret"] = seg["adv"] + seg["vpred"] 
Example #21
Source File: trpo_mpi.py    From lirpg with MIT License 5 votes vote down vote up
def add_vtarg_and_adv(seg, gamma, lam):
    new = np.append(seg["new"], 0)  # last element is only used for last vtarg, but we already zeroed it if last new = 1
    vpred = np.append(seg["vpred"], seg["nextvpred"])
    T = len(seg["rew"])
    seg["adv"] = gaelam = np.empty(T, 'float32')
    rew = seg["rew"]
    lastgaelam = 0
    for t in reversed(range(T)):
        nonterminal = 1-new[t+1]
        delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
        gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
    seg["tdlamret"] = seg["adv"] + seg["vpred"] 
Example #22
Source File: nn.py    From Kaggler with MIT License 5 votes vote down vote up
def predict_raw(self, X):
        """Predict targets for a feature matrix.

        Args:
            X (np.array of float): feature matrix for prediction
        """
        # b -- bias for the input and h layers
        b = np.ones((X.shape[0], 1))
        w2 = self.w[-(self.h + 1):].reshape(self.h + 1, 1)
        w1 = self.w[:-(self.h + 1)].reshape(self.i + 1, self.h)

        # Make X to have the same number of columns as self.i.
        # Because of the sparse matrix representation, X for prediction can
        # have a different number of columns.
        if X.shape[1] > self.i:
            # If X has more columns, cut extra columns.
            X = X[:, :self.i]
        elif X.shape[1] < self.i:
            # If X has less columns, cut the rows of the weight matrix between
            # the input and h layers instead of X itself because the SciPy
            # sparse matrix does not support .set_shape() yet.
            idx = range(X.shape[1])
            idx.append(self.i)        # Include the last row for the bias
            w1 = w1[idx, :]

        if sparse.issparse(X):
            return np.hstack((sigm(sparse.hstack((X, b)).dot(w1)), b)).dot(w2)
        else:
            return np.hstack((sigm(np.hstack((X, b)).dot(w1)), b)).dot(w2) 
Example #23
Source File: function_helper.py    From TradzQAI with Apache License 2.0 5 votes vote down vote up
def fill_for_noncomputable_vals(input_data, result_data):
    non_computable_values = np.repeat(
        np.nan, len(input_data) - len(result_data)
        )
    filled_result_data = np.append(non_computable_values, result_data)
    return filled_result_data 
Example #24
Source File: create_encodings.py    From Face-Recognition with MIT License 5 votes vote down vote up
def _filter_image_files(training_dir_path):
    exts = [".jpg", ".jpeg", ".png"]

    training_folder_files_list = []
    for list_files in _get_each_labels_files(training_dir_path):
        l = []
        for file in list_files:
            imageName, ext = os.path.splitext(file)
            if ext.lower() in exts:
                l.append(file)
        training_folder_files_list.append(l)

    return training_folder_files_list 
Example #25
Source File: ssresnet.py    From deep-models with Apache License 2.0 5 votes vote down vote up
def load_data(files, data_dir, label_count):
  data, labels = load_data_one(data_dir + '/' + files[0])
  for f in files[1:]:
    data_n, labels_n = load_data_one(data_dir + '/' + f)
    data = np.append(data, data_n, axis=0)
    labels = np.append(labels, labels_n, axis=0)
  labels = np.array([ [ float(i == label) for i in xrange(label_count) ] for label in labels ])
  return data, labels 
Example #26
Source File: bcfstore.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, filename):
        self._filename = filename
        print 'Loading BCF file to memory ... '+filename
        file = open(filename, 'rb')
        size = numpy.fromstring(file.read(8), dtype=numpy.uint64)
        file_sizes = numpy.fromstring(file.read(8*size), dtype=numpy.uint64)
        self._offsets = numpy.append(numpy.uint64(0),
                                     numpy.add.accumulate(file_sizes))
        self._memory = file.read()
        file.close() 
Example #27
Source File: resnet.py    From deep-models with Apache License 2.0 5 votes vote down vote up
def load_data(files, data_dir, label_count):
  data, labels = load_data_one(data_dir + '/' + files[0])
  for f in files[1:]:
    data_n, labels_n = load_data_one(data_dir + '/' + f)
    data = np.append(data, data_n, axis=0)
    labels = np.append(labels, labels_n, axis=0)
  labels = np.array([ [ float(i == label) for i in xrange(label_count) ] for label in labels ])
  return data, labels 
Example #28
Source File: ModelingCloth.py    From Modeling-Cloth with MIT License 5 votes vote down vote up
def get_spring_mix(ob, eidx):
    rs = []
    ls = []
    minrl = []
    for i in eidx:
        r = eidx[eidx == i[1]].shape[0]
        l = eidx[eidx == i[0]].shape[0]
        rs.append (min(r,l))
        ls.append (min(r,l))
    mix = 1 / np.array(rs + ls) ** 1.2
    
    return mix 
Example #29
Source File: ModelingCloth.py    From Modeling-Cloth with MIT License 5 votes vote down vote up
def collision_object_update(self, context):
    """Updates the collider object"""    
    collide = self.modeling_cloth_object_collision
    # remove objects from dict if deleted
    cull_list = []
    if 'colliders' in extra_data:
        if extra_data['colliders'] is not None:   
            if not collide:
                if self.name in extra_data['colliders']:
                    del(extra_data['colliders'][self.name])
            for i in extra_data['colliders']:
                remove = True
                if i in bpy.data.objects:
                    if bpy.data.objects[i].type == "MESH":
                        if bpy.data.objects[i].modeling_cloth_object_collision:
                            remove = False
                if remove:
                    cull_list.append(i)
    for i in cull_list:
        del(extra_data['colliders'][i])

    # add class to dict if true.
    if collide:    
        if 'colliders' not in extra_data:    
            extra_data['colliders'] = {}
        if extra_data['colliders'] is None:
            extra_data['colliders'] = {}
        extra_data['colliders'][self.name] = create_collider()

    
# cloth object detect updater: 
Example #30
Source File: filters.py    From lirpg with MIT License 5 votes vote down vote up
def __call__(self, x, update=True):
        return np.append(x, self.count/100.0)