Python numpy.split() Examples

The following are 30 code examples of numpy.split(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 6 votes vote down vote up
def build_vocabulary(self, path, datafile, vocab_size, char=True):
		files = codecs.open(datafile, 'r', encoding='utf-8').read()
		if char:
			words = []
			files = files.split('\n')
			for word in files:
				word = tuple(word)
				words.append(word)
		else:
			words = files.split()
		wordcount = Counter(c for line in words for c in line if c != ' ')
		with codecs.open(path, 'w', encoding='utf-8') as f:
			f.write("{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n".format("<PAD>", "<UNK>", "<SOS>", "<EOS>", "<SPA>"))
			for word, count in wordcount.most_common(len(wordcount)-5):
				f.write("{}\t{}\n".format(word, count))
		self.vocab_size = len(wordcount) - 5 
Example #2
Source File: dmc.py    From pyqmc with MIT License 6 votes vote down vote up
def dmc_propagate_parallel(wf,configs,weights,client, npartitions, *args, **kwargs):
    config = configs.split(npartitions)
    weight = np.split(weights,npartitions)
    runs=[ client.submit(dmc_propagate, wf, conf , wt, *args, **kwargs) for conf,wt in zip(config, weight)]
    allresults = list(zip(*[r.result() for r in runs]))
    configs.join(allresults[1])
    weights = np.concatenate(allresults[2])
    confweight = np.array([len(c.configs) for c in config], dtype=float)
    confweight_avg = confweight/(np.mean(confweight)*npartitions)
    weight = np.array([w['weight'] for w in allresults[0]])
    weight_avg = weight/np.mean(weight)
    block_avg = {}
    for k in allresults[0][0].keys():
        block_avg[k] = np.sum([res[k]*ww*cw for res,cw,ww in zip(allresults[0],confweight_avg, weight_avg)], axis=0)
    block_avg['weight'] = np.mean(weight)
    return block_avg, configs, weights 
Example #3
Source File: fancy.py    From brainforge with GNU General Public License v3.0 6 votes vote down vote up
def backpropagate(self, delta) -> np.ndarray:
        shape = delta.shape
        delta = rtm(delta)

        h, t, c = np.split(self.gates, 3, axis=1)

        dh = self.activation.backward(h) * t * delta
        dt = sigmoid.backward(t) * h * delta
        dc = sigmoid.backward(c) * self.inputs * delta
        dx = c * delta

        dgates = np.concatenate((dh, dt, dc), axis=1)
        self.nabla_w = self.inputs.T.dot(dgates)
        self.nabla_b = dgates.sum(axis=0)

        return (dgates.dot(self.weights.T) + dx).reshape(shape) 
Example #4
Source File: deform.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def np_sample(img, coords):
    # a numpy implementation of ImageSample layer
    coords = np.maximum(coords, 0)
    coords = np.minimum(coords, np.array([img.shape[0] - 1, img.shape[1] - 1]))

    lcoor = np.floor(coords).astype('int32')
    ucoor = lcoor + 1
    ucoor = np.minimum(ucoor, np.array([img.shape[0] - 1, img.shape[1] - 1]))
    diff = coords - lcoor
    neg_diff = 1.0 - diff

    lcoory, lcoorx = np.split(lcoor, 2, axis=2)
    ucoory, ucoorx = np.split(ucoor, 2, axis=2)
    diff = np.repeat(diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
    neg_diff = np.repeat(neg_diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
    diffy, diffx = np.split(diff, 2, axis=2)
    ndiffy, ndiffx = np.split(neg_diff, 2, axis=2)

    ret = img[lcoory, lcoorx, :] * ndiffx * ndiffy + \
        img[ucoory, ucoorx, :] * diffx * diffy + \
        img[lcoory, ucoorx, :] * ndiffy * diffx + \
        img[ucoory, lcoorx, :] * diffy * ndiffx
    return ret[:, :, 0, :] 
Example #5
Source File: test_molparse_from_string.py    From QCElemental with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fragsep_error():

    with pytest.raises(qcelemental.ValidationError) as e:
        qcelemental.molparse.from_arrays(
            domain="qmvz",
            speclabel=True,
            elbl=["ar1", "42AR2"],
            fragment_multiplicities=[3, 3],
            fragment_separators=np.array(["1"]),
            geom_unsettled=[[], ["1", "bond"]],
            hint_types=[],
            units="Bohr",
            variables=[("bond", "3")],
        )

    assert "unable to perform trial np.split on geometry" in str(e.value) 
Example #6
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 6 votes vote down vote up
def mini_batch(self, data_file):
		token_seqs = []
		with codecs.open(data_file, 'r', encoding='utf-8') as f:
			for line in f:
				line = line.strip('\n')
				parse_line = list(map(int, line.split()))
				if pm.REAL_WORLD_DATA:
					if len(parse_line) == pm.WGAN_SEQ_LENGTH:
						token_seqs.append(parse_line)
				else:
					if len(parse_line) == pm.SEQ_LENGTH:
						token_seqs.append(parse_line)

		self.num_batch = int(len(token_seqs) / self.batch_size)
		token_seqs = token_seqs[:self.num_batch * self.batch_size]
		self.token_sentences = np.array(token_seqs)
		self.sequence_batch = np.split(self.token_sentences, self.num_batch, 0)
		self.reset_pointer() 
Example #7
Source File: bounding_box.py    From TVQAplus with MIT License 6 votes vote down vote up
def _split_into_xyxy(self):
        if self.mode == "xyxy":
            # xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
            xmin, ymin, xmax, ymax = np.split(self.bbox, 4, axis=1)
            return xmin, ymin, xmax, ymax
        elif self.mode == "xywh":
            TO_REMOVE = 1
            xmin, ymin, w, h = np.split(self.bbox, 4, axis=1)
            return (
                xmin,
                ymin,
                # xmin + (w - TO_REMOVE).clamp(min=0),
                # ymin + (h - TO_REMOVE).clamp(min=0),
                xmin + np.clip(w - TO_REMOVE, 0, None),
                ymin + np.clip(h - TO_REMOVE, 0, None),
            )
        else:
            raise RuntimeError("Should not be here")

    # def resize(self, size, *args, **kwargs): 
Example #8
Source File: np_box_ops.py    From object_detector_app with MIT License 6 votes vote down vote up
def intersection(boxes1, boxes2):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxes1: a numpy array with shape [N, 4] holding N boxes
    boxes2: a numpy array with shape [M, 4] holding M boxes

  Returns:
    a numpy array with shape [N*M] representing pairwise intersection area
  """
  [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
  [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)

  all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
  all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
  intersect_heights = np.maximum(
      np.zeros(all_pairs_max_ymin.shape),
      all_pairs_min_ymax - all_pairs_max_ymin)
  all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
  all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
  intersect_widths = np.maximum(
      np.zeros(all_pairs_max_xmin.shape),
      all_pairs_min_xmax - all_pairs_max_xmin)
  return intersect_heights * intersect_widths 
Example #9
Source File: np_box_ops.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def intersection(boxes1, boxes2):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxes1: a numpy array with shape [N, 4] holding N boxes
    boxes2: a numpy array with shape [M, 4] holding M boxes

  Returns:
    a numpy array with shape [N*M] representing pairwise intersection area
  """
  [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
  [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)

  all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
  all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
  intersect_heights = np.maximum(
      np.zeros(all_pairs_max_ymin.shape),
      all_pairs_min_ymax - all_pairs_max_ymin)
  all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
  all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
  intersect_widths = np.maximum(
      np.zeros(all_pairs_max_xmin.shape),
      all_pairs_min_xmax - all_pairs_max_xmin)
  return intersect_heights * intersect_widths 
Example #10
Source File: np_box_ops.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def intersection(boxes1, boxes2):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxes1: a numpy array with shape [N, 4] holding N boxes
    boxes2: a numpy array with shape [M, 4] holding M boxes

  Returns:
    a numpy array with shape [N*M] representing pairwise intersection area
  """
  [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
  [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)

  all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
  all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
  intersect_heights = np.maximum(
      np.zeros(all_pairs_max_ymin.shape),
      all_pairs_min_ymax - all_pairs_max_ymin)
  all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
  all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
  intersect_widths = np.maximum(
      np.zeros(all_pairs_max_xmin.shape),
      all_pairs_min_xmax - all_pairs_max_xmin)
  return intersect_heights * intersect_widths 
Example #11
Source File: vaegan_mxnet.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def KLDivergenceLoss():
    '''KLDivergenceLoss loss
    '''

    data = mx.sym.Variable('data')
    mu1, lv1 = mx.sym.split(data,  num_outputs=2, axis=0)
    mu2 = mx.sym.zeros_like(mu1)
    lv2 = mx.sym.zeros_like(lv1)

    v1 = mx.sym.exp(lv1)
    v2 = mx.sym.exp(lv2)
    mu_diff_sq = mx.sym.square(mu1 - mu2)
    dimwise_kld = .5 * (
    (lv2 - lv1) + mx.symbol.broadcast_div(v1, v2) + mx.symbol.broadcast_div(mu_diff_sq, v2) - 1.)
    KL = mx.symbol.sum(dimwise_kld, axis=1)

    KLloss = mx.symbol.MakeLoss(mx.symbol.mean(KL),name='KLloss')
    return KLloss 
Example #12
Source File: uccsd.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def vector_to_amplitudes(vector, nmo, nocc):
    nocca, noccb = nocc
    nmoa, nmob = nmo
    nvira, nvirb = nmoa-nocca, nmob-noccb
    nocc = nocca + noccb
    nvir = nvira + nvirb
    nov = nocc * nvir
    size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
    if vector.size == size:
        #return ccsd.vector_to_amplitudes_s4(vector, nmo, nocc)
        raise RuntimeError('Input vector is GCCSD vecotr')
    else:
        sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
        sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
        sections = np.cumsum([sizea, sizeb])
        veca, vecb, t2ab = np.split(vector, sections)
        t1a, t2aa = ccsd.vector_to_amplitudes_s4(veca, nmoa, nocca)
        t1b, t2bb = ccsd.vector_to_amplitudes_s4(vecb, nmob, noccb)
        t2ab = t2ab.copy().reshape(nocca,noccb,nvira,nvirb)
        return (t1a,t1b), (t2aa,t2ab,t2bb) 
Example #13
Source File: test_ndarray_store_append.py    From arctic with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_append_read_large_ndarray(library, fw_pointers_cfg):
    with FwPointersCtx(fw_pointers_cfg):
        dtype = np.dtype([('abc', 'int64')])
        ndarr = np.arange(50 * 1024 * 1024 / dtype.itemsize).view(dtype=dtype)
        assert len(ndarr.tostring()) > 16 * 1024 * 1024
        library.write('MYARR1', ndarr)
        # Exactly enough appends to trigger 2 re-compacts, so the result should be identical
        # to writing the whole array at once
        ndarr2 = np.arange(240).view(dtype=dtype)
        for n in np.split(ndarr2, 120):
            library.append('MYARR1', n)

        saved_arr = library.read('MYARR1').data
        assert np.all(np.concatenate([ndarr, ndarr2]) == saved_arr)

        library.write('MYARR2', np.concatenate([ndarr, ndarr2]))

        version1 = library._read_metadata('MYARR1')
        version2 = library._read_metadata('MYARR2')
        assert version1['append_count'] == version2['append_count']
        assert version1['append_size'] == version2['append_size']
        assert version1['segment_count'] == version2['segment_count']
        assert version1['up_to'] == version2['up_to'] 
Example #14
Source File: kccsd_uhf.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def vector_to_amplitudes(vec, nmo, nocc, nkpts=1):
    nocca, noccb = nocc
    nmoa, nmob = nmo
    nvira, nvirb = nmoa - nocca, nmob - noccb
    sizes = (nkpts*nocca*nvira, nkpts*noccb*nvirb,
             nkpts**3*nocca**2*nvira**2, nkpts**3*nocca*noccb*nvira*nvirb,
             nkpts**3*noccb**2*nvirb**2)
    sections = np.cumsum(sizes[:-1])
    t1a, t1b, t2aa, t2ab, t2bb = np.split(vec, sections)

    t1a = t1a.reshape(nkpts,nocca,nvira)
    t1b = t1b.reshape(nkpts,noccb,nvirb)
    t2aa = t2aa.reshape(nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)
    t2ab = t2ab.reshape(nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)
    t2bb = t2bb.reshape(nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)
    return (t1a,t1b), (t2aa,t2ab,t2bb) 
Example #15
Source File: model.py    From dataiku-contrib with Apache License 2.0 6 votes vote down vote up
def clip_boxes_graph(boxes, window):
    """
    boxes: [N, (y1, x1, y2, x2)]
    window: [4] in the form y1, x1, y2, x2
    """
    # Split
    wy1, wx1, wy2, wx2 = tf.split(window, 4)
    y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
    # Clip
    y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
    x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
    y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
    x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
    clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
    clipped.set_shape((clipped.shape[0], 4))
    return clipped 
Example #16
Source File: multislaterpbc.py    From pyqmc with MIT License 5 votes vote down vote up
def evaluate_mos(self, aos, s):
        l = self._coefflookup[s]
        p = np.split(self.parameters[l], self.param_split[l], axis=-1)
        mo = [np.dot(ao, p[k]) for k, ao in enumerate(aos)]
        mo = np.concatenate(mo, axis=-1)
        return mo[..., self._det_occup[s]] 
Example #17
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 5 votes vote down vote up
def load_vocabulary(self, path):
		vocab = [line.split()[0] for line in codecs.open(path, 'r', encoding='utf-8').read().splitlines()]
		word2idx = {word: idx for idx, word in enumerate(vocab)}
		idx2word = {word2idx[word]: word for word in word2idx}
		return word2idx, idx2word 
Example #18
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 5 votes vote down vote up
def build_vocabulary(self):
		files = codecs.open(self.data_file, 'r', encoding='utf-8').read()
		words = files.split()
		wordcount = Counter(words)
		with codecs.open(self.path, 'w', encoding='utf-8') as f:
			f.write("{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n".format("<PAD>", "<UNK>", "<SOS>", "<EOS>"))
			for word, count in wordcount.most_common(9996):
				f.write("{}\t{}\n".format(word, count)) 
Example #19
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 5 votes vote down vote up
def build_vocabulary(self, path, datafile):
		files = codecs.open(datafile, 'r', encoding='utf-8').read()
		words = files.split()
		wordcount = Counter(words)
		with codecs.open(path, 'w', encoding='utf-8') as f:
			f.write("{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n".format("<PAD>", "<UNK>", "<SOS>", "<EOS>"))
			for word, count in wordcount.most_common(len(wordcount)):
				f.write("{}\t{}\n".format(word, count)) 
Example #20
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 5 votes vote down vote up
def load_vocabulary(self):
		vocab = [line.split()[0] for line in codecs.open(self.path, 'r', encoding='utf-8').read().splitlines()]
		word2idx = {word: idx for idx, word in enumerate(vocab)}
		idx2word = {word2idx[word]: word for word in word2idx}
		return word2idx, idx2word 
Example #21
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 5 votes vote down vote up
def load_dataset(self):
		sentences = [line for line in codecs.open(self.data_file, 'r', encoding='utf-8').read().split('\n') if line]
		word2idx, idx2word = self.load_vocabulary()

		token_list, sources = [], []
		for source in sentences:
			if len(source.split()) >= 19:
				x = [word2idx.get(word, 1) for word in (" ".join(source.split()[:19]) + " <EOS>").split()]
			else:
				# x = [word2idx.get(word, 1) for word in (source + (19 - len(source.split())) * " <PAD>" + " <EOS>").split()]
				continue
			token_list.append(x)
			sources.append(source)
		return token_list, sources 
Example #22
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 5 votes vote down vote up
def load_dataset(self, path, datafile):
		sentences = [line for line in codecs.open(datafile, 'r', encoding='utf-8').read().split('\n') if line]
		word2idx, idx2word = self.load_vocabulary(path)

		token_list, sources = [], []
		for source in sentences:
			x = [word2idx.get(word, 1) for word in (source + " <EOS>").split()]
			token_list.append(x)
			sources.append(source)
		return token_list, sources 
Example #23
Source File: eom_kccsd_uhf.py    From pyscf with Apache License 2.0 5 votes vote down vote up
def get_padding_k_idx(eom, cc):
    # Get location of padded elements in occupied and virtual space
    nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(cc, kind="split")
    nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
    nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
    return ((nonzero_opadding_alpha, nonzero_opadding_beta),
            (nonzero_vpadding_alpha, nonzero_vpadding_beta)) 
Example #24
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 5 votes vote down vote up
def mini_batch(self, path, datafile, seq_length):
		token_seqs, sentences = self.load_dataset(path, datafile, seq_length)
		self.num_batch = int(len(sentences) / self.batch_size)
		sentences = sentences[:self.batch_size * self.num_batch]
		tokens = token_seqs[:self.batch_size * self.num_batch]
		sentences, tokens = np.array(sentences), np.array(tokens)
		self.lines_batch = np.split(sentences, self.num_batch, 0)
		self.tokens_batch = np.split(tokens, self.num_batch, 0)
		self.reset_pointer() 
Example #25
Source File: interpolation.py    From scarlet with MIT License 5 votes vote down vote up
def apply_2D_trapezoid_rule(y, x, f, dNy, dNx=None, dy=None, dx=None):
    """Use the trapezoid rule to integrate over a subsampled function
    2D implementation of the trapezoid rule.
    See `apply_trapezoid_rule` for a description, with the difference
    that `f` is a function `f(y,x)`, where we note the c ++`(y,x)` ordering.
    """
    if dy is None:
        dy = y[1] - y[0]
    if dx is None:
        dx = x[1] - x[0]
    if dNx is None:
        dNx = dNy
    z, _y, _x = subsample_function(y, x, f, dNy, dNx, dy, dx)

    # Calculate the volume of each sub region
    dz = 0.4 * (z[:-1, :-1] + z[1:, :-1] + z[:-1, 1:] + z[1:, 1:])
    volumes = dy * dx * dz / dNy / dNx

    # Sum up the sub regions around each point to
    # give it the same shape as the original `(y,x)`
    _dNy = len(_y) // dNy
    _dNx = len(_x) // dNx
    volumes = np.array(
        np.split(np.array(np.split(volumes, _dNx, axis=1)), _dNy, axis=1)
    ).sum(axis=(2, 3))
    return volumes 
Example #26
Source File: eog_findpeaks.py    From NeuroKit with MIT License 5 votes vote down vote up
def _eog_findpeaks_blinker(eog_cleaned, sampling_rate=1000):
    """EOG blink detection based on BLINKER algorithm.

    Detects only potential blink landmarks and does not separate blinks from other artifacts yet.
    https://www.frontiersin.org/articles/10.3389/fnins.2017.00012/full

    """
    # Establish criterion
    threshold = 1.5 * np.std(eog_cleaned) + eog_cleaned.mean()
    min_blink = 0.05 * sampling_rate  # min blink frames

    potential_blinks = []
    for i, signal in enumerate(eog_cleaned):
        if signal > threshold:
            potential_blinks.append(i)

    # Make sure each blink is 50ms long and separated by 50ms
    indexes = np.where(np.diff(potential_blinks) > min_blink)[0]
    individual_blinks = np.split(np.diff(potential_blinks), indexes)

    blinks = []
    for idx, i in enumerate(individual_blinks):
        if len(i) > min_blink:
            blinks.append(idx)

    candidates = np.array(potential_blinks)[np.append(0, indexes)[blinks]]

    _, peaks, _, _, _, _ = _eog_features_delineate(eog_cleaned, candidates, sampling_rate=sampling_rate)

    # Blink peak markers
    peaks = np.array(peaks)

    return peaks 
Example #27
Source File: pano_lsd_align.py    From HorizonNet with MIT License 5 votes vote down vote up
def assignVanishingType(lines, vp, tol, area=10):
    numLine = len(lines)
    numVP = len(vp)
    typeCost = np.zeros((numLine, numVP))
    # perpendicular
    for vid in range(numVP):
        cosint = (lines[:, :3] * vp[[vid]]).sum(1)
        typeCost[:, vid] = np.arcsin(np.abs(cosint).clip(-1, 1))

    # infinity
    u = np.stack([lines[:, 4], lines[:, 5]], -1)
    u = u.reshape(-1, 1) * 2 * np.pi - np.pi
    v = computeUVN_vec(lines[:, :3], u, lines[:, 3])
    xyz = uv2xyzN_vec(np.hstack([u, v]), np.repeat(lines[:, 3], 2))
    xyz = multi_linspace(xyz[0::2].reshape(-1), xyz[1::2].reshape(-1), 100)
    xyz = np.vstack([blk.T for blk in np.split(xyz, numLine)])
    xyz = xyz / np.linalg.norm(xyz, axis=1, keepdims=True)
    for vid in range(numVP):
        ang = np.arccos(np.abs((xyz * vp[[vid]]).sum(1)).clip(-1, 1))
        notok = (ang < area * np.pi / 180).reshape(numLine, 100).sum(1) != 0
        typeCost[notok, vid] = 100

    I = typeCost.min(1)
    tp = typeCost.argmin(1)
    tp[I > tol] = numVP + 1

    return tp, typeCost 
Example #28
Source File: recurrent_op.py    From brainforge with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, X, W, b):
        outdim = W.shape[-1] // 4
        time, batch, indim = X.shape

        Z = zX(time, batch, indim+outdim)
        O = zX(time, batch, outdim)
        C, f, i, o, cand, Ca = zX(6, time, batch, outdim)

        for t in range(time):
            Z[t] = np.concatenate((X[t], O[t-1]), axis=-1)

            p = np.dot(Z[t], W) + b
            p[:, :outdim] = self.actfn.forward(p[:, :outdim])
            p[:, outdim:] = sigmoid.forward(p[:, outdim:])

            cand[t] = p[:, :outdim]
            f[t] = p[:, outdim:2*outdim]
            i[t] = p[:, 2*outdim:3*outdim]
            o[t] = p[:, 3*outdim:]
            # cand[t], f[t], i[t], o[t] = np.split(p, 4, axis=1)

            C[t] = C[t-1] * f[t] + cand[t] * i[t]

            Ca[t] = self.actfn.forward(C[t])
            O[t] = Ca[t] * o[t]

        return O, Z, np.stack((C, Ca, cand, f, i, o)) 
Example #29
Source File: dataloader.py    From Generative-adversarial-Nets-in-NLP with Apache License 2.0 5 votes vote down vote up
def mini_batch(self, positive_file, negative_file):
		positive_examples, negative_examples = [], []
		with codecs.open(positive_file, 'r', encoding='utf-8') as fpo:
			for line in fpo:
				line = line.strip('\n')
				parse_line = list(map(int, line.split()))
				positive_examples.append(parse_line)
		with codecs.open(negative_file, 'r', encoding='utf-8') as fng:
			for line in fng:
				line = line.strip('\n')
				parse_line = list(map(int, line.split()))
				negative_examples.append(parse_line)

		positive_labels = [[1] for _ in positive_examples]
		negative_labels = [[0] for _ in negative_examples]

		# Shuffle sampling
		shuffle_indices = np.random.permutation(np.arange(len(positive_labels)-16))
		positive_examples = np.array(positive_examples)[shuffle_indices]
		negative_examples = np.array(negative_examples)[shuffle_indices]
		positive_labels = np.array(positive_labels)[shuffle_indices]
		negative_labels = np.array(negative_labels)[shuffle_indices]

		# Split
		self.num_batch = int(len(positive_labels) / self.batch_size)
		positive_examples = positive_examples[:self.batch_size * self.num_batch]
		negative_examples = negative_examples[:self.batch_size * self.num_batch]
		positive_labels = positive_labels[:self.batch_size * self.num_batch]
		negative_labels = negative_labels[:self.batch_size * self.num_batch]
		self.positive_examples_batch = np.split(positive_examples, self.num_batch, 0)
		self.negative_examples_batch = np.split(negative_examples, self.num_batch, 0)
		self.positive_labels_batch = np.split(positive_labels, self.num_batch, 0)
		self.negative_labels_batch = np.split(negative_labels, self.num_batch, 0)
		self.reset_pointer() 
Example #30
Source File: eom_uccsd.py    From pyscf with Apache License 2.0 5 votes vote down vote up
def vector_to_amplitudes_ea(vector, nmo, nocc):
    nocca, noccb = nocc
    nmoa, nmob = nmo
    nvira, nvirb = nmoa-nocca, nmob-noccb

    sizes = (nvira, nvirb, nocca*nvira*(nvira-1)//2, nocca*nvirb*nvira,
             noccb*nvira*nvirb, noccb*nvirb*(nvirb-1)//2)
    sections = np.cumsum(sizes[:-1])
    r1a, r1b, r2a, r2aba, r2bab, r2b = np.split(vector, sections)
    r2a = r2a.reshape(nocca,nvira*(nvira-1)//2)
    r2b = r2b.reshape(noccb,nvirb*(nvirb-1)//2)
    r2aba = r2aba.reshape(nocca,nvirb,nvira).copy()
    r2bab = r2bab.reshape(noccb,nvira,nvirb).copy()

    idxa = np.tril_indices(nvira, -1)
    idxb = np.tril_indices(nvirb, -1)
    r2aaa = np.zeros((nocca,nvira,nvira), vector.dtype)
    r2bbb = np.zeros((noccb,nvirb,nvirb), vector.dtype)
    r2aaa[:,idxa[0],idxa[1]] = r2a
    r2aaa[:,idxa[1],idxa[0]] =-r2a
    r2bbb[:,idxb[0],idxb[1]] = r2b
    r2bbb[:,idxb[1],idxb[0]] =-r2b

    r1 = (r1a.copy(), r1b.copy())
    r2 = (r2aaa, r2aba, r2bab, r2bbb)
    return r1, r2