Python numpy.reshape() Examples

The following are code examples for showing how to use numpy.reshape(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
Example 2
Project: model-api-sequence   Author: evandowning   File: color.py    GNU General Public License v3.0 6 votes vote down vote up
def extract(folder,fn,num,width):
    label = None
    seq = list()

    # Read in sample's sequence
    path = os.path.join(folder,fn+'.pkl')
    with open(path,'rb') as fr:
        for i in range(num):
            t = pkl.load(fr)
            label = t[1]

            # Replace API call integers with pixel values
            seq.extend([api_md5(str(api)) for api in t[0]])

    # Pad array if it's not divisible by width (3 channels for RGB)
    r = len(seq) % (width*3)
    if r != 0:
        seq.extend([api_md5('0')]*(width*3-r))

    # Reshape numpy array (3 channels)
    data = np.reshape(np.array(seq), (-1,width*3))
    data = data.astype(np.int8)

    return fn,data,label 
Example 3
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    Apache License 2.0 6 votes vote down vote up
def set_values(name, param, pretrained):
#{{{
    """
    Initialize a network parameter with pretrained values.
    We check that sizes are compatible.
    """
    param_value = param.get_value()
    if pretrained.size != param_value.size:
        raise Exception(
            "Size mismatch for parameter %s. Expected %i, found %i."
            % (name, param_value.size, pretrained.size)
        )
    param.set_value(np.reshape(
        pretrained, param_value.shape
    ).astype(np.float32))
#}}} 
Example 4
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 6 votes vote down vote up
def mtx_freq2visi(M, p_mic_x, p_mic_y):
    """
    build the matrix that maps the Fourier series to the visibility
    :param M: the Fourier series expansion is limited from -M to M
    :param p_mic_x: a vector that constains microphones x coordinates
    :param p_mic_y: a vector that constains microphones y coordinates
    :return:
    """
    num_mic = p_mic_x.size
    ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F')
    G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C')
    count_G = 0
    for q in range(num_mic):
        p_x_outer = p_mic_x[q]
        p_y_outer = p_mic_y[q]
        for qp in range(num_mic):
            if not q == qp:
                p_x_qqp = p_x_outer - p_mic_x[qp]
                p_y_qqp = p_y_outer - p_mic_y[qp]
                norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2)
                phi_qqp = np.arctan2(p_y_qqp, p_x_qqp)
                G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \
                                np.exp(1j * ms * phi_qqp)
                count_G += 1
    return G 
Example 5
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 6 votes vote down vote up
def mtx_updated_G(phi_recon, M, mtx_amp2visi_ri, mtx_fri2visi_ri):
    """
    Update the linear transformation matrix that links the FRI sequence to the
    visibilities by using the reconstructed Dirac locations.
    :param phi_recon: the reconstructed Dirac locations (azimuths)
    :param M: the Fourier series expansion is between -M to M
    :param p_mic_x: a vector that contains microphones' x-coordinates
    :param p_mic_y: a vector that contains microphones' y-coordinates
    :param mtx_freq2visi: the linear mapping from Fourier series to visibilities
    :return:
    """
    L = 2 * M + 1
    ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
    phi_recon = np.reshape(phi_recon, (1, -1), order='F')
    mtx_amp2freq = np.exp(-1j * ms_half * phi_recon)  # size: (M + 1) x K
    mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :]))  # size: (2M + 1) x K
    mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
    # projection mtx_freq2visi to the null space of mtx_fri2amp
    mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
                                       linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
    G_updated = np.dot(mtx_amp2visi_ri, mtx_fri2amp_ri) + \
                np.dot(mtx_fri2visi_ri, mtx_null_proj)
    return G_updated 
Example 6
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: test.py    MIT License 6 votes vote down vote up
def im_detect(sess, net, im):
    blobs, im_scales = _get_blobs(im)
    assert len(im_scales) == 1, "Only single-image batch implemented"

    im_blob = blobs['data']
    # seems to have height, width, and image scales
    # still not sure about the scale, maybe full image it is 1.
    blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)

    _, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])

    boxes = rois[:, 1:5] / im_scales[0]
    # print(scores.shape, bbox_pred.shape, rois.shape, boxes.shape)
    scores = np.reshape(scores, [scores.shape[0], -1])
    bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
    if cfg.FLAGS.test_bbox_reg:
        # Apply bounding-box regression deltas
        box_deltas = bbox_pred
        pred_boxes = bbox_transform_inv(boxes, box_deltas)
        pred_boxes = _clip_boxes(pred_boxes, im.shape)
    else:
        # Simply repeat the boxes, once for each class
        pred_boxes = np.tile(boxes, (1, scores.shape[1]))

    return scores, pred_boxes 
Example 7
Project: ddd-utils   Author: inconvergent   File: random.py    MIT License 6 votes vote down vote up
def random_points_in_circle(n,xx,yy,rr):
  """
  get n random points in a circle.
  """


  rnd = random(size=(n,3))
  t = 2.*PI*rnd[:,0]
  u = rnd[:,1:].sum(axis=1)
  r = zeros(n,'float')
  mask = u>1.
  xmask = logical_not(mask)
  r[mask] = 2.-u[mask]
  r[xmask] = u[xmask]
  xyp = reshape(rr*r,(n,1))*column_stack( (cos(t),sin(t)) )
  dartsxy  = xyp + array([xx,yy])
  return dartsxy 
Example 8
Project: VisualNN   Author: angelhunt   File: Train.py    GNU General Public License v3.0 6 votes vote down vote up
def trainModel(model_path, data_path, result_path):
    model = buildModel(model_path)
    (x_train, y_train), (x_test, y_test) = load(data_path)
    print(x_train[0].shape)
    x_train = np.reshape(x_train,(60000, 784))
    x_test = np.reshape(x_test, (10000, 784))
    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                metrics=['accuracy'])

    history = model.fit(x_train, y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    model.save(result_path) 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,32,32)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 10
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,224,224)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = self.adv_dict["adv_labels"][self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 11
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,32,32)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 12
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):
        img=self.adv_flat[self.sample_num,:]
        if(self.transp == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(28,28)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image

        if self.transform is not None:
            img = self.transform(img)
        if self.target_transform is not None:
            target = self.target_transform(target)
        self.sample_num = self.sample_num + 1
        return img, target 
Example 13
Project: YOLOv1_tensorflow_windows   Author: FatherRen   File: main.py    GNU General Public License v3.0 6 votes vote down vote up
def detect(self, img):
        # 将读取的图像数据转换成yolo的输入形式
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        # 将图像数据传入到yolo中得到结果
        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)
        return result 
Example 14
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_gpu.py    MIT License 6 votes vote down vote up
def auto_inverse(self, whole_spectrum):
        whole_spectrum = np.copy(whole_spectrum).astype(complex)
        whole_spectrum[whole_spectrum < 1] = 1
        overwrap = self.buffer_size * 2
        height = whole_spectrum.shape[0]
        parallel_dif = (height-overwrap) // self.parallel
        if height < self.parallel*overwrap:
            raise Exception('voice length is too small to use gpu, or parallel number is too big')

        spec = [self.inverse(whole_spectrum[range(i, i+parallel_dif*self.parallel, parallel_dif), :]) for i in tqdm.tqdm(range(parallel_dif+overwrap))]
        spec = spec[overwrap:]
        spec = np.concatenate(spec, axis=1)
        spec = spec.reshape(-1, self.wave_len)

        #Below code don't consider wave_len and wave_dif, I'll fix.
        wave = np.fft.ifft(spec, axis=1).real
        pad = np.zeros((wave.shape[0], 2), dtype=float)
        wave = np.concatenate([wave, pad], axis=1)

        dst = np.zeros((wave.shape[0]+3)*self.wave_dif, dtype=float)
        for i in range(4):
            w = wave[range(i, wave.shape[0], 4),:]
            w = w.reshape(-1)
            dst[i*self.wave_dif:i*self.wave_dif+len(w)] += w
        return dst*0.5 
Example 15
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 6 votes vote down vote up
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
Example 16
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders4.py    MIT License 6 votes vote down vote up
def preprocess_observations(input_observation, prev_processed_observation, input_dimensions):
    """ convert the 210x160x3 uint8 frame into a 7056 float vector """
    processed_observation = remove_color(preprocess(input_observation))
    processed_observation = processed_observation.astype(np.float).ravel()

    # subtract the previous frame from the current one so we are only processing on changes in the game
    if prev_processed_observation is not None:
        input_observation = processed_observation - prev_processed_observation
        # B = np.reshape(input_observation, (-1, 84))
        # plt.imshow(np.array(np.squeeze(B)))
        # plt.show()
    else:
        input_observation = np.zeros(input_dimensions)
    # store the previous frame so we can subtract from it next time
    prev_processed_observations = processed_observation
    return input_observation, prev_processed_observations 
Example 17
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders3.py    MIT License 6 votes vote down vote up
def preprocess_observations(input_observation, prev_processed_observation, input_dimensions):
    """ convert the 210x160x3 uint8 frame into a 6400 float vector """
    # processed_observation = input_observation
    # processed_observation = remove_color(processed_observation)
    # processed_observation = remove_background(processed_observation)
    # processed_observation[processed_observation != 0] = 1 # everything else (paddles, ball) just set to 1
    # # Convert from 80 x 80 matrix to 6400 x 1 matrix
    #
    # processed_observation = processed_observation[25:195,]

    processed_observation = remove_color(preprocess(input_observation))
    processed_observation = processed_observation.astype(np.float).ravel()

    # subtract the previous frame from the current one so we are only processing on changes in the game
    if prev_processed_observation is not None:
        input_observation = processed_observation - prev_processed_observation
        # B = np.reshape(input_observation, (-1, 84))
        # plt.imshow(np.array(np.squeeze(B)))
        # plt.show()
    else:
        input_observation = np.zeros(input_dimensions)
    # store the previous frame so we can subtract from it next time
    prev_processed_observations = processed_observation
    return input_observation, prev_processed_observations 
Example 18
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    MIT License 6 votes vote down vote up
def one_vs_all(X, y, num_labels, learning_rate):
    rows = X.shape[0]
    params = X.shape[1]
    
    # k X (n + 1) array for the parameters of each of the k classifiers
    all_theta = np.zeros((num_labels, params + 1))
    
    # insert a column of ones at the beginning for the intercept term
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # labels are 1-indexed instead of 0-indexed
    for i in range(1, num_labels + 1):
        theta = np.zeros(params + 1)
        y_i = np.array([1 if label == i else 0 for label in y])
        y_i = np.reshape(y_i, (rows, 1))
        
        # minimize the objective function
        fmin = minimize(fun=cost, x0=theta, args=(X, y_i, learning_rate), method='TNC', jac=gradient)
        all_theta[i-1,:] = fmin.x
    
    return all_theta 
Example 19
Project: deep-learning-note   Author: wdxtub   File: 8_kmeans_pca.py    MIT License 6 votes vote down vote up
def plot_n_image(X, n):
    """ plot first n images
    n has to be a square number
    """
    pic_size = int(np.sqrt(X.shape[1]))
    grid_size = int(np.sqrt(n))

    first_n_images = X[:n, :]

    fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size,
                                    sharey=True, sharex=True, figsize=(8, 8))

    for r in range(grid_size):
        for c in range(grid_size):
            ax_array[r, c].imshow(first_n_images[grid_size * r + c].reshape((pic_size, pic_size)))
            plt.xticks(np.array([]))
            plt.yticks(np.array([])) 
Example 20
Project: parasweep   Author: eviatarbach   File: sweepers.py    MIT License 6 votes vote down vote up
def mapping(self, sim_ids, sweep_id, save=True):
        """
        Return a labelled array which maps parameters to simulation IDs.

        See :func:`parasweep.sweepers.Sweep.mapping` for argument information.
        Returns a multidimensional labelled array (using xarray) which maps
        the parameters to the simulation IDs. The array coordinates correspond
        to each sweep parameter, while the values contain the simulation IDs.
        If ``save=True``, this array will be saved as a netCDF file with the
        name ``sim_ids_{sweep_id}.nc``.

        """
        import xarray
        import numpy

        sim_ids_array = xarray.DataArray(numpy.reshape(numpy.array(sim_ids),
                                                       self.lengths),
                                         coords=self.values, dims=self.keys,
                                         name='sim_id')

        if save:
            sim_ids_filename = f'sim_ids_{sweep_id}.nc'
            sim_ids_array.to_netcdf(sim_ids_filename)

        return sim_ids_array 
Example 21
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detection.py    Apache License 2.0 6 votes vote down vote up
def _parse_label(self, label):
        """Helper function to parse object detection label.

        Format for raw label:
        n \t k \t ... \t [id \t xmin\t ymin \t xmax \t ymax \t ...] \t [repeat]
        where n is the width of header, 2 or larger
        k is the width of each object annotation, can be arbitrary, at least 5
        """
        if isinstance(label, nd.NDArray):
            label = label.asnumpy()
        raw = label.ravel()
        if raw.size < 7:
            raise RuntimeError("Label shape is invalid: " + str(raw.shape))
        header_width = int(raw[0])
        obj_width = int(raw[1])
        if (raw.size - header_width) % obj_width != 0:
            msg = "Label shape %s inconsistent with annotation width %d." \
                %(str(raw.shape), obj_width)
            raise RuntimeError(msg)
        out = np.reshape(raw[header_width:], (-1, obj_width))
        # remove bad ground-truths
        valid = np.where(np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2]))[0]
        if valid.size < 1:
            raise RuntimeError('Encounter sample with no valid label.')
        return out[valid, :] 
Example 22
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detection.py    Apache License 2.0 6 votes vote down vote up
def reshape(self, data_shape=None, label_shape=None):
        """Reshape iterator for data_shape or label_shape.

        Parameters
        ----------
        data_shape : tuple or None
            Reshape the data_shape to the new shape if not None
        label_shape : tuple or None
            Reshape label shape to new shape if not None
        """
        if data_shape is not None:
            self.check_data_shape(data_shape)
            self.provide_data = [(self.provide_data[0][0], (self.batch_size,) + data_shape)]
        if label_shape is not None:
            self.check_label_shape(label_shape)
            self.provide_label = [(self.provide_label[0][0], (self.batch_size,) + label_shape)] 
Example 23
Project: mtrl-auto-uav   Author: brunapearson   File: test_mtrl.py    MIT License 5 votes vote down vote up
def get_image(client):
    image_buf = np.zeros((1, 432 , 768, 4))
    image_response = client.simGetImages([airsim.ImageRequest(0, airsim.ImageType.Scene, False, False)])[0]
    image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)
    image_rgba = image1d.reshape(image_response.height, image_response.width, 4)
    image_rgba = cv2.cvtColor(image_rgba,cv2.COLOR_RGBA2BGR)
    image_buf = image_rgba.copy()
    image_buf = cv2.resize(image_buf,(224,224))

    return image_buf

################################################################################

# confirm connection to simulator 
Example 24
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: layer.py    MIT License 5 votes vote down vote up
def _shuffle_roidb_inds(self):
    """Randomly permute the training roidb."""
    # If the random flag is set, 
    # then the database is shuffled according to system time
    # Useful for the validation set
    if self._random:
      st0 = np.random.get_state()
      millis = int(round(time.time() * 1000)) % 4294967295
      np.random.seed(millis)
    
    if cfg.TRAIN.ASPECT_GROUPING:
      raise NotImplementedError
      '''
      widths = np.array([r['width'] for r in self._roidb])
      heights = np.array([r['height'] for r in self._roidb])
      horz = (widths >= heights)
      vert = np.logical_not(horz)
      horz_inds = np.where(horz)[0]
      vert_inds = np.where(vert)[0]
      inds = np.hstack((
          np.random.permutation(horz_inds),
          np.random.permutation(vert_inds)))
      inds = np.reshape(inds, (-1, 2))
      row_perm = np.random.permutation(np.arange(inds.shape[0]))
      inds = np.reshape(inds[row_perm, :], (-1,))
      self._perm = inds
      '''
    else:
      self._perm = np.random.permutation(np.arange(len(self._roidb)))
    # Restore the random state
    if self._random:
      np.random.set_state(st0)
      
    self._cur = 0 
Example 25
Project: xrft   Author: xgcm   File: xrft.py    MIT License 5 votes vote down vote up
def _stack_chunks(da, dim, suffix='_segment'):
    """Reshape a DataArray so there is only one chunk along dimension `dim`"""
    data = da.data
    attr = da.attrs
    newdims = []
    newcoords = {}
    newshape = []
    for d in da.dims:
        if d in dim:
            axis_num = da.get_axis_num(d)
            if np.diff(da.chunks[axis_num]).sum() != 0:
                raise ValueError("Chunk lengths need to be the same.")
            n = len(da[d])
            chunklen = da.chunks[axis_num][0]
            coord_rs = da[d].data.reshape((int(n/chunklen),int(chunklen)))
            newdims.append(d + suffix)
            newdims.append(d)
            newshape.append(int(n/chunklen))
            newshape.append(int(chunklen))
            newcoords[d+suffix] = range(int(n/chunklen))
            newcoords[d] = coord_rs[0]
        else:
            newdims.append(d)
            newshape.append(len(da[d]))
            newcoords[d] = da[d].data

    da = xr.DataArray(data.reshape(newshape), dims=newdims, coords=newcoords,
                     attrs=attr)

    return da 
Example 26
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def numpy_detrend(da):
    """
    Detrend a 2D field by subtracting out the least-square plane fit.

    Parameters
    ----------
    da : `numpy.array`
        The data to be detrended

    Returns
    -------
    da : `numpy.array`
        The detrended input data
    """
    N = da.shape

    G = np.ones((N[0]*N[1],3))
    for i in range(N[0]):
        G[N[1]*i:N[1]*i+N[1], 1] = i+1
        G[N[1]*i:N[1]*i+N[1], 2] = np.arange(1, N[1]+1)

    d_obs = np.reshape(da.copy(), (N[0]*N[1],1))
    m_est = np.dot(np.dot(spl.inv(np.dot(G.T, G)), G.T), d_obs)
    d_est = np.dot(G, m_est)

    lin_trend = np.reshape(d_est, N)

    return da - lin_trend 
Example 27
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 5 votes vote down vote up
def gen_sig_at_mic(sigmak2_k, phi_k, pos_mic_x,
                   pos_mic_y, omega_band, sound_speed,
                   SNR, Ns=256):
    """
    generate complex base-band signal received at microphones
    :param sigmak2_k: the variance of the circulant complex Gaussian signal
                emitted by the K sources
    :param phi_k: source locations (azimuths)
    :param pos_mic_x: a vector that contains microphones' x coordinates
    :param pos_mic_y: a vector that contains microphones' y coordinates
    :param omega_band: mid-band (ANGULAR) frequency [radian/sec]
    :param sound_speed: speed of sound
    :param SNR: SNR for the received signal at microphones
    :param Ns: number of snapshots used to estimate the covariance matrix
    :return: y_mic: received (complex) signal at microphones
    """
    num_mic = pos_mic_x.size
    xk, yk = polar2cart(1, phi_k)  # source locations in cartesian coordinates
    # reshape to use broadcasting
    xk = np.reshape(xk, (1, -1), order='F')
    yk = np.reshape(yk, (1, -1), order='F')
    pos_mic_x = np.reshape(pos_mic_x, (-1, 1), order='F')
    pos_mic_y = np.reshape(pos_mic_y, (-1, 1), order='F')

    t = np.reshape(np.linspace(0, 10 * np.pi, num=Ns), (1, -1), order='F')
    K = sigmak2_k.size
    sigmak2_k = np.reshape(sigmak2_k, (-1, 1), order='F')

    # x_tilde_k size: K x length_of_t
    # circular complex Gaussian process
    x_tilde_k = np.sqrt(sigmak2_k / 2.) * (np.random.randn(K, Ns) + 1j *
                                           np.random.randn(K, Ns))
    y_mic = np.dot(np.exp(-1j * (xk * pos_mic_x + yk * pos_mic_y) / (sound_speed / omega_band)),
                   x_tilde_k * np.exp(1j * omega_band * t))
    signal_energy = linalg.norm(y_mic, 'fro') ** 2
    noise_energy = signal_energy / 10 ** (SNR * 0.1)
    sigma2_noise = noise_energy / (Ns * num_mic)
    noise = np.sqrt(sigma2_noise / 2.) * (np.random.randn(*y_mic.shape) + 1j *
                                          np.random.randn(*y_mic.shape))
    y_mic_noisy = y_mic + noise
    return y_mic_noisy, y_mic 
Example 28
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 5 votes vote down vote up
def polar_distance(x1, x2):
    """
    Given two arrays of numbers x1 and x2, pairs the cells that are the
    closest and provides the pairing matrix index: x1(index(1,:)) should be as
    close as possible to x2(index(2,:)). The function outputs the average of 
    the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
    :param x1: vector 1
    :param x2: vector 2
    :return: d: minimum distance between d
             index: the permutation matrix
    """
    x1 = np.reshape(x1, (1, -1), order='F')
    x2 = np.reshape(x2, (1, -1), order='F')
    N1 = x1.size
    N2 = x2.size
    diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order='F')))
    min_N1_N2 = np.min([N1, N2])
    index = np.zeros((min_N1_N2, 2), dtype=int)
    if min_N1_N2 > 1:
        for k in range(min_N1_N2):
            d2 = np.min(diffmat, axis=0)
            index2 = np.argmin(diffmat, axis=0)
            index1 = np.argmin(d2)
            index2 = index2[index1]
            index[k, :] = [index1, index2]
            diffmat[index2, :] = float('inf')
            diffmat[:, index1] = float('inf')
        d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
    else:
        d = np.min(diffmat)
        index = np.argmin(diffmat)
        if N1 == 1:
            index = np.array([1, index])
        else:
            index = np.array([index, 1])
    return d, index 
Example 29
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 5 votes vote down vote up
def mtx_updated_G_multiband(phi_recon, M, mtx_amp2visi_ri,
                            mtx_fri2visi_ri, num_bands):
    """
    Update the linear transformation matrix that links the FRI sequence to the
    visibilities by using the reconstructed Dirac locations.
    :param phi_recon: the reconstructed Dirac locations (azimuths)
    :param M: the Fourier series expansion is between -M to M
    :param p_mic_x: a vector that contains microphones' x-coordinates
    :param p_mic_y: a vector that contains microphones' y-coordinates
    :param mtx_freq2visi: the linear mapping from Fourier series to visibilities
    :return:
    """
    L = 2 * M + 1
    ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
    phi_recon = np.reshape(phi_recon, (1, -1), order='F')
    mtx_amp2freq = np.exp(-1j * ms_half * phi_recon)  # size: (M + 1) x K
    mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :]))  # size: (2M + 1) x K
    mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
    # projection mtx_freq2visi to the null space of mtx_fri2amp
    mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
                                       linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
    G_updated = np.dot(mtx_amp2visi_ri,
                       linalg.block_diag(*([mtx_fri2amp_ri] * num_bands))
                       ) + \
                np.dot(mtx_fri2visi_ri,
                       linalg.block_diag(*([mtx_null_proj] * num_bands))
                       )
    return G_updated 
Example 30
Project: Stock-Price-Prediction   Author: dhingratul   File: helper.py    MIT License 5 votes vote down vote up
def load_data(filename, seq_len, norm_win):
    """
    Loads the data from a csv file into arrays

    Input: Filename, sequence Lenght, normalization window(True, False)
    Output: X_tr, Y_tr, X_te, Y_te

    Note: Normalization data using n_i = (p_i / p_0) - 1,
    denormalization using p_i = p_0(n_i + 1)

    Note: Run from timeSeriesPredict.py
    """
    fid = open(filename, 'r').read()
    data = fid.split('\n')
    sequence_length = seq_len + 1
    out = []
    for i in range(len(data) - sequence_length):
        out.append(data[i: i + sequence_length])
    if norm_win:
        out = normalize_windows(out)
    out = np.array(out)
    split_ratio = 0.9
    split = round(split_ratio * out.shape[0])
    train = out[:int(split), :]
    np.random.shuffle(train)
    X_tr = train[:, :-1]
    Y_tr = train[:, -1]
    X_te = out[int(split):, :-1]
    Y_te = out[int(split):, -1]
    X_tr = np.reshape(X_tr, (X_tr.shape[0], X_tr.shape[1], 1))
    X_te = np.reshape(X_te, (X_te.shape[0], X_te.shape[1], 1))
    return [X_tr, Y_tr, X_te, Y_te] 
Example 31
Project: Stock-Price-Prediction   Author: dhingratul   File: helper.py    MIT License 5 votes vote down vote up
def predict_pt_pt(model, data):
    """
    Predicts only one timestep ahead
    Input: keras model, testing data
    Output: Predicted sequence

    Note: Run from timeSeriesPredict.py
    """
    predicted = model.predict(data)
    predicted = np.reshape(predicted, (predicted.size, ))
    return predicted 
Example 32
Project: ddd-utils   Author: inconvergent   File: random.py    MIT License 5 votes vote down vote up
def random_unit_vec(num, scale):
  from numpy.random import normal

  rnd = normal(size=(num,3))
  d = norm(rnd,axis=1)
  rnd[:] /= reshape(d, (num,1))
  return rnd*scale 
Example 33
Project: VisualNN   Author: angelhunt   File: shapes.py    GNU General Public License v3.0 5 votes vote down vote up
def reshape(layer):
    temp = np.zeros(layer['shape']['input'])
    shape = map(int, layer['params']['dim'].split(','))[1:]
    temp = np.reshape(temp, shape)
    return list(temp.shape[::-1]) 
Example 34
Project: VisualNN   Author: angelhunt   File: shapes.py    GNU General Public License v3.0 5 votes vote down vote up
def get_layer_shape(layer):
    # separating checking the type of layer inorder to make it modular
    # which can be reused in case we only want to get shapes of a single
    # layer, for example: if a new layer is added to already drawn model
    dataLayers = ['ImageData', 'Data', 'HDF5Data', 'Input', 'WindowData', 'MemoryData', 'DummyData']

    if(layer['info']['type'] in dataLayers):
        return data(layer)

    elif(layer['info']['type'] in ['Convolution', 'Pooling', 'Deconvolution', 'DepthwiseConv']):
        return filter(layer)

    elif(layer['info']['type'] in ['InnerProduct', 'Recurrent', 'RNN', 'LSTM', 'Embed']):
        return output(layer)

    elif(layer['info']['type'] == 'Flatten'):
        return flatten(layer)

    elif(layer['info']['type'] == 'Reshape'):
        return reshape(layer)

    elif(layer['info']['type'] == 'Upsample'):
        return upsample(layer)

    elif(layer['info']['type'] == 'RepeatVector'):
        return repeat(layer)

    elif(layer['info']['type'] in ['SPP', 'Crop']):
        raise Exception('Cannot determine shape of ' + layer['info']['type'] + 'layer.')

    else:
        return identity(layer) 
Example 35
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 5 votes vote down vote up
def plot_log(filename, show=True):
    # load data
    keys = []
    values = []
    with open(filename, 'r') as f:
        reader = csv.DictReader(f)
        for row in reader:
            if keys == []:
                for key, value in row.items():
                    keys.append(key)
                    values.append(float(value))
                continue

            for _, value in row.items():
                values.append(float(value))

        values = np.reshape(values, newshape=(-1, len(keys)))
        values[:,0] += 1

    fig = plt.figure(figsize=(4,6))
    fig.subplots_adjust(top=0.95, bottom=0.05, right=0.95)
    fig.add_subplot(211)
    for i, key in enumerate(keys):
        if key.find('loss') >= 0 and not key.find('val') >= 0:  # training loss
            plt.plot(values[:, 0], values[:, i], label=key)
    plt.legend()
    plt.title('Training loss')

    fig.add_subplot(212)
    for i, key in enumerate(keys):
        if key.find('acc') >= 0:  # acc
            plt.plot(values[:, 0], values[:, i], label=key)
    plt.legend()
    plt.title('Training and validation accuracy')

    # fig.savefig('result/log.png')
    if show:
        plt.show() 
Example 36
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def lid_adv_term(clean_logits, adv_logits, batch_size=100):
    """Calculate LID loss term for a minibatch of advs logits

    :param logits: clean logits
    :param A_logits: adversarial logits
    :return: 
    """
    # y_pred = tf.nn.softmax(logits)
    c_pred = tf.reshape(clean_logits, (batch_size, -1))
    a_pred = tf.reshape(adv_logits, (batch_size, -1))

    # calculate pairwise distance
    r = tf.reduce_sum(c_pred * a_pred, 1)
    # turn r into column vector
    r1 = tf.reshape(r, [-1, 1])
    D = r1 - 2 * tf.matmul(c_pred, tf.transpose(a_pred)) + tf.transpose(r1) + \
        tf.ones([batch_size, batch_size])

    # find the k nearest neighbor
    D1 = -tf.sqrt(D)
    D2, _ = tf.nn.top_k(D1, k=21, sorted=True)
    D3 = -D2[:, 1:]

    m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1]))
    v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1)  # to avoid nan
    lids = -20 / v_log

    ## batch normalize lids
    lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12)

    return lids 
Example 37
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def mle_single(data, x, k=10):
    data = np.asarray(data, dtype=np.float32)
    x = np.asarray(x, dtype=np.float32)
    if x.ndim == 1:
        x = x.reshape((-1, x.shape[0]))
    # dim = x.shape[1]

    k = min(k, len(data)-1)
    f = lambda v: - k / np.sum(np.log(v/v[-1]))
    a = cdist(x, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a[0]

# lid of a batch of query points X 
Example 38
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def score_point(tup):
    """
    TODO
    :param tup:
    :return:
    """
    x, kde = tup

    return kde.score_samples(np.reshape(x, (1, -1)))[0] 
Example 39
Project: neural-fingerprinting   Author: StephanZheng   File: utils_cifar.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read_CIFAR100(data_folder):
    """ Reads and parses examples from CIFAR100 python data files """

    train_img = []
    train_label = []
    test_img = []
    test_label = []

    train_file_list = ["cifar-100-python/train"]
    test_file_list = ["cifar-100-python/test"]

    tmp_dict = unpickle(os.path.join(data_folder, train_file_list[0]))
    train_img.append(tmp_dict["data"])
    train_label.append(tmp_dict["fine_labels"])

    tmp_dict = unpickle(os.path.join(data_folder, test_file_list[0]))
    test_img.append(tmp_dict["data"])
    test_label.append(tmp_dict["fine_labels"])

    train_img = np.concatenate(train_img)
    train_label = np.concatenate(train_label)
    test_img = np.concatenate(test_img)
    test_label = np.concatenate(test_label)

    train_img = np.reshape(
        train_img, [NUM_TRAIN_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
    test_img = np.reshape(
        test_img, [NUM_TEST_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])

    # change format from [B, C, H, W] to [B, H, W, C] for feeding to Tensorflow
    train_img = np.transpose(train_img, [0, 2, 3, 1])
    test_img = np.transpose(test_img, [0, 2, 3, 1])
    mean_img = np.mean(np.concatenate([train_img, test_img]), axis=0)

    CIFAR100_data = {}
    CIFAR100_data["train_img"] = train_img - mean_img
    CIFAR100_data["test_img"] = test_img - mean_img
    CIFAR100_data["train_label"] = train_label
    CIFAR100_data["test_label"] = test_label

    return CIFAR100_data 
Example 40
Project: neural-fingerprinting   Author: StephanZheng   File: attacks_tf.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def jacobian(sess, x, grads, target, X, nb_features, nb_classes, feed=None):
    """
    TensorFlow implementation of the foward derivative / Jacobian
    :param x: the input placeholder
    :param grads: the list of TF gradients returned by jacobian_graph()
    :param target: the target misclassification class
    :param X: numpy array with sample input
    :param nb_features: the number of features in the input
    :return: matrix of forward derivatives flattened into vectors
    """
    # Prepare feeding dictionary for all gradient computations
    feed_dict = {x: X}
    if feed is not None:
        feed_dict.update(feed)

    # Initialize a numpy array to hold the Jacobian component values
    jacobian_val = np.zeros((nb_classes, nb_features), dtype=np_dtype)

    # Compute the gradients for all classes
    for class_ind, grad in enumerate(grads):
        run_grad = sess.run(grad, feed_dict)
        jacobian_val[class_ind] = np.reshape(run_grad, (1, nb_features))

    # Sum over all classes different from the target class to prepare for
    # saliency map computation in the next step of the attack
    other_classes = utils.other_classes(nb_classes, target)
    grad_others = np.sum(jacobian_val[other_classes, :], axis=0)

    return jacobian_val[target], grad_others 
Example 41
Project: neural-fingerprinting   Author: StephanZheng   File: attacks_tf.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _compute_gradients(self, loss_fn, x, unused_optim_state):
        """Compute gradient estimates using SPSA."""
        # Assumes `x` is a list, containing a [1, H, W, C] image
        assert len(x) == 1 and x[0].get_shape().as_list()[0] == 1
        x = x[0]
        x_shape = x.get_shape().as_list()

        def body(i, grad_array):
            delta = self._delta
            delta_x = self._get_delta(x, delta)
            delta_x = tf.concat([delta_x, -delta_x], axis=0)
            loss_vals = tf.reshape(
                loss_fn(x + delta_x),
                [2 * self._num_samples] + [1] * (len(x_shape) - 1))
            avg_grad = reduce_mean(loss_vals * delta_x, axis=0) / delta
            avg_grad = tf.expand_dims(avg_grad, axis=0)
            new_grad_array = grad_array.write(i, avg_grad)
            return i + 1, new_grad_array

        def cond(i, _):
            return i < self._num_iters

        _, all_grads = tf.while_loop(
            cond,
            body,
            loop_vars=[
                0, tf.TensorArray(size=self._num_iters, dtype=tf_dtype)
            ],
            back_prop=False,
            parallel_iterations=1)
        avg_grad = reduce_sum(all_grads.stack(), axis=0)
        return [avg_grad] 
Example 42
Project: tensorflow-DeepFM   Author: ChenglongChen   File: DeepFM.py    MIT License 5 votes vote down vote up
def predict(self, Xi, Xv):
        """
        :param Xi: list of list of feature indices of each sample in the dataset
        :param Xv: list of list of feature values of each sample in the dataset
        :return: predicted probability of each sample
        """
        # dummy y
        dummy_y = [1] * len(Xi)
        batch_index = 0
        Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
        y_pred = None
        while len(Xi_batch) > 0:
            num_batch = len(y_batch)
            feed_dict = {self.feat_index: Xi_batch,
                         self.feat_value: Xv_batch,
                         self.label: y_batch,
                         self.dropout_keep_fm: [1.0] * len(self.dropout_fm),
                         self.dropout_keep_deep: [1.0] * len(self.dropout_deep),
                         self.train_phase: False}
            batch_out = self.sess.run(self.out, feed_dict=feed_dict)

            if batch_index == 0:
                y_pred = np.reshape(batch_out, (num_batch,))
            else:
                y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))

            batch_index += 1
            Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)

        return y_pred 
Example 43
Project: animal-tracking   Author: colinlaney   File: track.py    Creative Commons Zero v1.0 Universal 5 votes vote down vote up
def drawFloorCrop(event, x, y, flags, params):
    global perspectiveMatrix, name, RENEW_TETRAGON
    imgCroppingPolygon = np.zeros_like(params['imgFloorCorners'])
    if event == cv2.EVENT_RBUTTONUP:
        cv2.destroyWindow(f'Floor Corners for {name}')
    if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP:
        RENEW_TETRAGON = True
        h = params['imgFloorCorners'].shape[0]
        # delete 5th extra vertex of the floor cropping tetragon
        params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0)
        params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h,0]
        
        # Sort cropping tetragon vertices counter-clockwise starting with top left
        params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name])
        # Get the matrix of perspective transformation
        params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2))
        tetragonVertices = np.float32(params['croppingPolygons'][name])
        tetragonVerticesUpd = np.float32([[0,0], [0,h], [h,h], [h,0]])
        perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd)
    if event == cv2.EVENT_LBUTTONDOWN:
        if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON:
            params['croppingPolygons'][name] = np.array([[0,0]])
            RENEW_TETRAGON = False
        if len(params['croppingPolygons'][name]) == 1:
            params['croppingPolygons'][name][0] = [x,y]
        params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x,y]], axis=0)
    if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON):
        params['croppingPolygons'][name][-1] = [x,y]
        if len(params['croppingPolygons'][name]) > 1:
            cv2.fillPoly(
                imgCroppingPolygon,
                [np.reshape(
                    params['croppingPolygons'][name],
                    (len(params['croppingPolygons'][name]),2)
                )],
                BGR_COLOR['green'], cv2.LINE_AA)
            imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.)
            cv2.imshow(f'Floor Corners for {name}', imgCroppingPolygon) 
Example 44
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_gpu.py    MIT License 5 votes vote down vote up
def save(path, bps, data):
        if data.dtype != np.int16:
            data = data.astype(np.int16)
        data = np.reshape(data, -1)
        wav.write(path, bps, data) 
Example 45
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_util.py    MIT License 5 votes vote down vote up
def save(path, bps, data):
        if data.dtype != np.int16:
            data = data.astype(np.int16)
        data = np.reshape(data, -1)
        wav.write(path, bps, data) 
Example 46
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 5 votes vote down vote up
def save(path, bps, data):
    if data.dtype != np.int16:
        data = data.astype(np.int16)
    data = np.reshape(data, -1)
    wav.write(path, bps, data) 
Example 47
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders4.py    MIT License 5 votes vote down vote up
def preprocess(observation):
    observation = cv2.cvtColor(cv2.resize(observation, (84, 110)), cv2.COLOR_BGR2GRAY)
    observation = observation[26:110, :]
    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)
    return np.reshape(observation, (84, 84, 1)) 
Example 48
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders3.py    MIT License 5 votes vote down vote up
def preprocess(observation):
    observation = cv2.cvtColor(cv2.resize(observation, (84, 110)), cv2.COLOR_BGR2GRAY)
    observation = observation[26:110, :]
    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)
    return np.reshape(observation, (84, 84, 1)) 
Example 49
Project: skylab   Author: coenders   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def skymap(plt, vals, **kwargs):
    fig, ax = plt.subplots(subplot_kw=dict(projection="aitoff"))

    gridsize = 1000

    x = np.linspace(np.pi, -np.pi, 2 * gridsize)
    y = np.linspace(np.pi, 0., gridsize)

    X, Y = np.meshgrid(x, y)

    r = hp.rotator.Rotator(rot=(-180., 0., 0.))

    YY, XX = r(Y.ravel(), X.ravel())

    pix = hp.ang2pix(hp.npix2nside(len(vals)), YY, XX)

    Z = np.reshape(vals[pix], X.shape)

    lon = x[::-1]
    lat = np.pi /2.  - y

    cb = kwargs.pop("colorbar", dict())
    cb.setdefault("orientation", "horizontal")
    cb.setdefault("fraction", 0.075)

    title = cb.pop("title", None)

    p = ax.pcolormesh(lon, lat, Z, **kwargs)

    cbar = fig.colorbar(p, **cb)

    cbar.solids.set_edgecolor("face")
    cbar.update_ticks()
    if title is not None:
        cbar.set_label(title)

    ax.xaxis.set_ticks([])

    return fig, ax 
Example 50
Project: deep-learning-note   Author: wdxtub   File: 3_ptb_train.py    MIT License 5 votes vote down vote up
def make_batches(id_list, batch_size, num_step):
    # 计算总的额 batch 数量,每个 batch 包含的单词数量是 batch_size * num_step
    num_batches = (len(id_list) - 1) // (batch_size * num_step)
    # 整理成维度为 [batch_size, num_batches * num_step] 的二维数组
    data = np.array(id_list[: num_batches * batch_size * num_step])
    data = np.reshape(data, [batch_size, num_batches * num_step])
    # 沿着第二个维度将数据切分成 num_batches 个 batch,存入一个数组
    data_batches = np.split(data, num_batches, axis=1)

    # 重复上述操作,但每个位置向右移动一位。这里得到的是 RNN 每一步输出所需要预测的下一个单词
    label = np.array(id_list[1 : num_batches * batch_size * num_step + 1])
    label = np.reshape(label, [batch_size, num_batches * num_step])
    label_batches = np.split(label, num_batches, axis=1)
    # 返回一个长度为 num_batches 的数组,其中每一项包括一个 data 矩阵和一个 label 矩阵
    return list(zip(data_batches, label_batches)) 
Example 51
Project: mlearn   Author: materialsvirtuallab   File: nnp.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def load_weights(self, weights_filename):
        """
        Load weights file of trained Neural Network Potential.

        Args
            weights_filename (str): The weights file.
        """
        with open(weights_filename) as f:
            weights_lines = f.readlines()

        weight_param = pd.DataFrame([line.split() for line in weights_lines
                                     if "#" not in line])
        weight_param.columns = ['value', 'type', 'index', 'start_layer',
                                'start_neuron', 'end_layer', 'end_neuron']

        for layer_index in range(1, len(self.layer_sizes)):
            weights_group = weight_param[(weight_param['start_layer'] == str(layer_index - 1))
                                         & (weight_param['end_layer'] == str(layer_index))]

            weights = np.reshape(np.array(weights_group['value'], dtype=np.float),
                                 (self.layer_sizes[layer_index - 1],
                                  self.layer_sizes[layer_index]))
            self.weights.append(weights)

            bs_group = weight_param[(weight_param['type'] == 'b') &
                                    (weight_param['start_layer'] == str(layer_index))]
            bs = np.array(bs_group['value'], dtype=np.float)
            self.bs.append(bs)

        self.weight_param = weight_param 
Example 52
Project: spqrel_tools   Author: LCAS   File: iswaving.py    MIT License 5 votes vote down vote up
def image_qi2np(dataImage):
    image = None
    if( dataImage != None ):
        image = np.reshape( np.frombuffer(dataImage[6], dtype='%iuint8' % dataImage[2]), (dataImage[1], dataImage[0], dataImage[2]))
        # image = np.fromstring(str(alImage[6]), dtype=np.uint8).reshape( alImage[1],alImage[0], dataImage[2])
    return image 
Example 53
Project: neural-pipeline   Author: toodef   File: img_segmentation.py    MIT License 5 votes vote down vote up
def dice(preds: torch.Tensor, trues: torch.Tensor) -> np.ndarray:
    preds_inner = preds.data.cpu().numpy().copy()
    trues_inner = trues.data.cpu().numpy().copy()

    preds_inner = np.reshape(preds_inner, (preds_inner.shape[0], preds_inner.size // preds_inner.shape[0]))
    trues_inner = np.reshape(trues_inner, (trues_inner.shape[0], trues_inner.size // trues_inner.shape[0]))

    intersection = (preds_inner * trues_inner).sum(1)
    scores = (2. * intersection + eps) / (preds_inner.sum(1) + trues_inner.sum(1) + eps)

    return scores 
Example 54
Project: neural-pipeline   Author: toodef   File: img_segmentation.py    MIT License 5 votes vote down vote up
def jaccard(preds: torch.Tensor, trues: torch.Tensor):
    preds_inner = preds.cpu().data.numpy().copy()
    trues_inner = trues.cpu().data.numpy().copy()

    preds_inner = np.reshape(preds_inner, (preds_inner.shape[0], preds_inner.size // preds_inner.shape[0]))
    trues_inner = np.reshape(trues_inner, (trues_inner.shape[0], trues_inner.size // trues_inner.shape[0]))
    intersection = (preds_inner * trues_inner).sum(1)
    scores = (intersection + eps) / ((preds_inner + trues_inner).sum(1) - intersection + eps)

    return scores 
Example 55
Project: rl_3d   Author: avdmitry   File: agent_a3c.py    MIT License 5 votes vote down vote up
def Preprocess(frame):
    if (channels == 1):
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame = cv2.resize(frame, (resolution[1], resolution[0]))
    return np.reshape(frame, resolution) 
Example 56
Project: rl_3d   Author: avdmitry   File: agent_dqn.py    MIT License 5 votes vote down vote up
def Preprocess(img):
    #cv2.imshow("frame-train", img)
    #cv2.waitKey(20)
    if (channels == 1):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = cv2.resize(img, (resolution[1], resolution[0]))
    #cv2.imshow("frame-train", img)
    #cv2.waitKey(200)
    return np.reshape(img, resolution) 
Example 57
Project: rl_3d   Author: avdmitry   File: agent_dqn.py    MIT License 5 votes vote down vote up
def GetAction(self, state):

        state = state.astype(np.float32)
        state = state.reshape([1] + list(resolution))
        return self.session.run(self.action, feed_dict={self.s_: state})[0] 
Example 58
Project: fuku-ml   Author: fukuball   File: Utility.py    MIT License 5 votes vote down vote up
def excute(self):

        for model in self.models:

            avg_error = 0

            validate_num = int(math.ceil(len(model.train_Y) / 10))

            model.train_Y = np.reshape(model.train_Y, (-1, 1))
            dataset = np.concatenate((model.train_X, model.train_Y), axis=1)
            np.random.shuffle(dataset)

            error = 0

            for i in range(10):

                model.train_X = np.concatenate((dataset[(i + 1) * validate_num:, :-1], dataset[:i * validate_num, :-1]), axis=0)
                model.train_Y = np.concatenate((dataset[(i + 1) * validate_num:, -1], dataset[:i * validate_num, -1]), axis=0)
                model.init_W()
                model.train()
                validate_X = dataset[i * validate_num:(i + 1) * validate_num, :-1]
                validate_Y = dataset[i * validate_num:(i + 1) * validate_num, -1]

                if hasattr(model, 'class_list'):
                    error = error + model.calculate_avg_error_all_class(validate_X, validate_Y, model.W)
                else:
                    error = error + model.calculate_avg_error(validate_X, validate_Y, model.W)

            model.train_X = dataset[:, :-1]
            model.train_Y = dataset[:, -1]

            dataset = None
            avg_error = error / 10
            self.avg_errors.append(avg_error)

        return self.avg_errors 
Example 59
Project: fuku-ml   Author: fukuball   File: DecisionTree.py    MIT License 5 votes vote down vote up
def divide_set(self, X, Y, column, value):

        Y = np.reshape(Y, (-1, 1))
        XY = np.concatenate((X, Y), axis=1)

        splitting_function = None

        value_is_float = True

        try:
            value = float(value)
        except ValueError:
            value_is_float = False

        if value_is_float:
            # for int and float values
            def splitting_function(row):
                return float(row[column]) >= value
        else:
            # for strings
            def splitting_function(row):
                return row[column] == value

        list1 = [row for row in XY if splitting_function(row)]
        list2 = [row for row in XY if not splitting_function(row)]

        list1 = np.array(list1)
        list2 = np.array(list2)

        return (list1, list2) 
Example 60
Project: fuku-ml   Author: fukuball   File: ProbabilisticSVM.py    MIT License 5 votes vote down vote up
def train(self):

        if (self.status != 'init'):
            print("Please load train data and init W first.")
            return self.W

        self.status = 'train'

        self.svm_processor = svm.BinaryClassifier()
        self.svm_processor.load_train_data()
        self.svm_processor.train_X = self.train_X
        self.svm_processor.train_Y = self.train_Y
        self.svm_processor.set_param(svm_kernel=self.svm_kernel, gamma=self.gamma, C=self.C)
        self.svm_processor.init_W()
        self.svm_processor.train()

        # slow
        svm_transform_X = np.apply_along_axis(self.svm_score, axis=1, arr=self.train_X)
        svm_transform_X = np.reshape(svm_transform_X, (-1, 1))
        svm_transform_X0 = np.reshape(np.ones(self.data_num), (-1, 1))
        svm_transform_X = np.concatenate((svm_transform_X0, svm_transform_X), axis=1)

        self.logistic_processor = logistic_regression.LogisticRegression()
        self.logistic_processor.load_train_data()
        self.logistic_processor.train_X = svm_transform_X
        self.logistic_processor.train_Y = self.train_Y
        self.logistic_processor.set_param(feed_mode=self.feed_mode, step_eta=self.step_eta, updates=self.updates)
        self.logistic_processor.init_W()
        self.logistic_processor.train()

        return self.W 
Example 61
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: convert_data.py    Apache License 2.0 5 votes vote down vote up
def convert_mat_to_images(args):
    '''convert the caltech101 mat file to images
    Examples
    --------
    python convert_data.py --dataset /home/ubuntu/datasets/caltech101/data/caltech101_silhouettes_28.mat --save_path /home/ubuntu/datasets/caltech101/data/ --invert --height 32 --width 32
    '''
    dataset = scipy.io.loadmat("{}/{}".format(args.save_path, args.dataset))

    # image pixel data
    X = dataset['X']

    # image class labels (not used in this project)
    Y = dataset['Y']

    total_image = X.shape[0]

    h=args.height
    w=args.width

    for i in range(total_image):
        img = X[i]
        img = np.reshape(img, (28, 28))
        if args.invert:
            img = (1-img)*255
        else:
            img = img*255
        img = Image.fromarray(img, 'L')
        img = img.rotate(-90)
        img = img.resize([h, w], Image.BILINEAR)
        img.save(args.save_path + '/img' + str(i) + '.png') 
Example 62
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: text_cnn.py    Apache License 2.0 5 votes vote down vote up
def data_iter(batch_size, num_embed, pre_trained_word2vec=False):
    print('Loading data...')
    if pre_trained_word2vec:
        word2vec = data_helpers.load_pretrained_word2vec('data/rt.vec')
        x, y = data_helpers.load_data_with_word2vec(word2vec)
        # reshpae for convolution input
        x = np.reshape(x, (x.shape[0], 1, x.shape[1], x.shape[2]))
        embed_size = x.shape[-1]
        sentence_size = x.shape[2]
        vocab_size = -1
    else:
        x, y, vocab, vocab_inv = data_helpers.load_data()
        embed_size = num_embed
        sentence_size = x.shape[1]
        vocab_size = len(vocab)

    # randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = y[shuffle_indices]

    # split train/valid set
    x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:]
    y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:]
    print('Train/Valid split: %d/%d' % (len(y_train), len(y_dev)))
    print('train shape:', x_train.shape)
    print('valid shape:', x_dev.shape)
    print('sentence max words', sentence_size)
    print('embedding size', embed_size)
    print('vocab size', vocab_size)

    train = mx.io.NDArrayIter(
        x_train, y_train, batch_size, shuffle=True)
    valid = mx.io.NDArrayIter(
        x_dev, y_dev, batch_size)

    return (train, valid, sentence_size, embed_size, vocab_size) 
Example 63
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: text_cnn.py    Apache License 2.0 5 votes vote down vote up
def data_iter(batch_size, num_embed, pre_trained_word2vec=False):
    logger.info('Loading data...')
    if pre_trained_word2vec:
        word2vec = data_helpers.load_pretrained_word2vec('data/rt.vec')
        x, y = data_helpers.load_data_with_word2vec(word2vec)
        # reshpae for convolution input
        x = np.reshape(x, (x.shape[0], 1, x.shape[1], x.shape[2]))
        embed_size = x.shape[-1]
        sentence_size = x.shape[2]
        vocab_size = -1
    else:
        x, y, vocab, vocab_inv = data_helpers.load_data()
        embed_size = num_embed
        sentence_size = x.shape[1]
        vocab_size = len(vocab)

    # randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = y[shuffle_indices]

    # split train/valid set
    x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:]
    y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:]
    logger.info('Train/Valid split: %d/%d' % (len(y_train), len(y_dev)))
    logger.info('train shape: %(shape)s', {'shape': x_train.shape})
    logger.info('valid shape: %(shape)s', {'shape': x_dev.shape})
    logger.info('sentence max words: %(shape)s', {'shape': sentence_size})
    logger.info('embedding size: %(msg)s', {'msg': embed_size})
    logger.info('vocab size: %(msg)s', {'msg': vocab_size})

    train = mx.io.NDArrayIter(
        x_train, y_train, batch_size, shuffle=True)
    valid = mx.io.NDArrayIter(
        x_dev, y_dev, batch_size)
    return (train, valid, sentence_size, embed_size, vocab_size) 
Example 64
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: VAE.py    Apache License 2.0 5 votes vote down vote up
def encoder(model, x):
        params = model.arg_params
        encoder_n = np.shape(params['encoder_h_bias'].asnumpy())[0]
        encoder_h = np.dot(params['encoder_h_weight'].asnumpy(), np.transpose(x)) \
                    + np.reshape(params['encoder_h_bias'].asnumpy(), (encoder_n,1))
        act_h = np.tanh(encoder_h)
        mu = np.transpose(np.dot(params['mu_weight'].asnumpy(),act_h)) + params['mu_bias'].asnumpy()
        logvar = np.transpose(np.dot(params['logvar_weight'].asnumpy(),act_h)) + params['logvar_bias'].asnumpy()
        return mu,logvar 
Example 65
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_slice_channel():
    def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
        ins = []
        if squeeze_axis:
            shape = np.random.randint(2, 5, data_ndim).tolist()
            shape[axis] = num_outputs
            out_ele_shape = [ele for ele in shape]
            del out_ele_shape[axis]
        else:
            shape = np.random.randint(1, 5, data_ndim).tolist()
            shape[axis] *= num_outputs
            out_ele_shape = [ele for ele in shape]
            out_ele_shape[axis] //= num_outputs
        data_npy = np.random.normal(size=shape)
        out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
        data = mx.sym.Variable('data')
        sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
        exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
        assert len(exe.outputs) == num_outputs
        outputs = exe.forward(is_train=True, data=data_npy)
        for i in range(num_outputs):
            gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
                                         (i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
            if squeeze_axis:

                assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
            else:
                assert_almost_equal(outputs[i].asnumpy(), gt)
        # test backward
        exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
        if squeeze_axis:
            assert_almost_equal(exe.grad_arrays[0].asnumpy(),
                                np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
                                               axis=axis))
        else:
            assert_almost_equal(exe.grad_arrays[0].asnumpy(),
                                np.concatenate(out_grads_npy, axis=axis))
    check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
    check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
    check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
    check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True) 
Example 66
Project: mtrl-auto-uav   Author: brunapearson   File: pfm.py    MIT License 4 votes vote down vote up
def read_pfm(file):
    """ Read a pfm file """
    file = open(file, 'rb')

    color = None
    width = None
    height = None
    scale = None
    endian = None

    header = file.readline().rstrip()
    header = str(bytes.decode(header, encoding='utf-8'))
    if header == 'PF':
        color = True
    elif header == 'Pf':
        color = False
    else:
        raise Exception('Not a PFM file.')

    pattern = r'^(\d+)\s(\d+)\s$'
    temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
    dim_match = re.match(pattern, temp_str)
    if dim_match:
        width, height = map(int, dim_match.groups())
    else:
        temp_str += str(bytes.decode(file.readline(), encoding='utf-8'))
        dim_match = re.match(pattern, temp_str)
        if dim_match:
            width, height = map(int, dim_match.groups())
        else:
            raise Exception('Malformed PFM header: width, height cannot be found')

    scale = float(file.readline().rstrip())
    if scale < 0: # little-endian
        endian = '<'
        scale = -scale
    else:
        endian = '>' # big-endian

    data = np.fromfile(file, endian + 'f')
    shape = (height, width, 3) if color else (height, width)

    data = np.reshape(data, shape)
    # DEY: I don't know why this was there.
    file.close()
    
    return data, scale 
Example 67
Project: xrft   Author: xgcm   File: xrft.py    MIT License 4 votes vote down vote up
def detrendn(da, axes=None):
    """
    Detrend by subtracting out the least-square plane or least-square cubic fit
    depending on the number of axis.

    Parameters
    ----------
    da : `dask.array`
        The data to be detrended

    Returns
    -------
    da : `numpy.array`
        The detrended input data
    """
    N = [da.shape[n] for n in axes]
    M = []
    for n in range(da.ndim):
        if n not in axes:
            M.append(da.shape[n])

    if len(N) == 2:
        G = np.ones((N[0]*N[1],3))
        for i in range(N[0]):
            G[N[1]*i:N[1]*i+N[1], 1] = i+1
            G[N[1]*i:N[1]*i+N[1], 2] = np.arange(1, N[1]+1)
        if type(da) == xr.DataArray:
            d_obs = np.reshape(da.copy().values, (N[0]*N[1],1))
        else:
            d_obs = np.reshape(da.copy(), (N[0]*N[1],1))
    elif len(N) == 3:
        if type(da) == xr.DataArray:
            if da.ndim > 3:
                raise NotImplementedError("Cubic detrend is not implemented "
                                         "for 4-dimensional `xarray.DataArray`."
                                         " We suggest converting it to "
                                         "`dask.array`.")
            else:
                d_obs = np.reshape(da.copy().values, (N[0]*N[1]*N[2],1))
        else:
            d_obs = np.reshape(da.copy(), (N[0]*N[1]*N[2],1))

        G = np.ones((N[0]*N[1]*N[2],4))
        G[:,3] = np.tile(np.arange(1,N[2]+1), N[0]*N[1])
        ys = np.zeros(N[1]*N[2])
        for i in range(N[1]):
            ys[N[2]*i:N[2]*i+N[2]] = i+1
        G[:,2] = np.tile(ys, N[0])
        for i in range(N[0]):
            G[len(ys)*i:len(ys)*i+len(ys),1] = i+1
    else:
        raise NotImplementedError("Detrending over more than 4 axes is "
                                 "not implemented.")

    m_est = np.dot(np.dot(spl.inv(np.dot(G.T, G)), G.T), d_obs)
    d_est = np.dot(G, m_est)

    lin_trend = np.reshape(d_est, da.shape)

    return da - lin_trend 
Example 68
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 4 votes vote down vote up
def test_chunks_to_segments():
    N = 32
    da = xr.DataArray(np.random.rand(N,N,N),
                     dims=['time','y','x'],
                     coords={'time':range(N),'y':range(N),'x':range(N)}
                     )

    with pytest.raises(ValueError):
        xrft.dft(da.chunk(chunks=((20,N,N),(N-20,N,N))), dim=['time'],
                detrend='linear', chunks_to_segments=True)

    ft = xrft.dft(da.chunk({'time':16}), dim=['time'], shift=False,
                 chunks_to_segments=True)
    assert ft.dims == ('time_segment','freq_time','y','x')
    data = da.chunk({'time':16}).data.reshape((2,16,N,N))
    npt.assert_almost_equal(ft.values, dsar.fft.fftn(data, axes=[1]),
                           decimal=7)
    ft = xrft.dft(da.chunk({'y':16,'x':16}), dim=['y','x'], shift=False,
                 chunks_to_segments=True)
    assert ft.dims == ('time','y_segment','freq_y','x_segment','freq_x')
    data = da.chunk({'y':16,'x':16}).data.reshape((N,2,16,2,16))
    npt.assert_almost_equal(ft.values, dsar.fft.fftn(data, axes=[2,4]),
                           decimal=7)
    ps = xrft.power_spectrum(da.chunk({'y':16,'x':16}), dim=['y','x'],
                            shift=False, density=False,
                            chunks_to_segments=True)
    npt.assert_almost_equal(ps.values,
                           (ft*np.conj(ft)).real.values,
                           )
    da2 = xr.DataArray(np.random.rand(N,N,N),
                      dims=['time','y','x'],
                      coords={'time':range(N),'y':range(N),'x':range(N)}
                      )
    ft2 = xrft.dft(da2.chunk({'y':16,'x':16}), dim=['y','x'], shift=False,
                  chunks_to_segments=True)
    cs = xrft.cross_spectrum(da.chunk({'y':16,'x':16}),
                            da2.chunk({'y':16,'x':16}),
                            dim=['y','x'], shift=False, density=False,
                            chunks_to_segments=True)
    npt.assert_almost_equal(cs.values,
                           (ft*np.conj(ft2)).real.values,
                           ) 
Example 69
Project: FRIDA   Author: LCAV   File: utils.py    MIT License 4 votes vote down vote up
def polar_distance(x1, x2):
    """
    Given two arrays of numbers x1 and x2, pairs the cells that are the
    closest and provides the pairing matrix index: x1(index(1,:)) should be as
    close as possible to x2(index(2,:)). The function outputs the average of the
    absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
    :param x1: vector 1
    :param x2: vector 2
    :return: d: minimum distance between d
             index: the permutation matrix
    """
    x1 = np.reshape(x1, (1, -1), order='F')
    x2 = np.reshape(x2, (1, -1), order='F')
    N1 = x1.size
    N2 = x2.size
    diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order='F')))
    min_N1_N2 = np.min([N1, N2])
    index = np.zeros((min_N1_N2, 2), dtype=int)
    if min_N1_N2 > 1:
        for k in xrange(min_N1_N2):
            d2 = np.min(diffmat, axis=0)
            index2 = np.argmin(diffmat, axis=0)
            index1 = np.argmin(d2)
            index2 = index2[index1]
            index[k, :] = [index1, index2]
            diffmat[index2, :] = float('inf')
            diffmat[:, index1] = float('inf')
        d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
    else:
        d = np.min(diffmat)
        index = np.argmin(diffmat)
        if N1 == 1:
            index = np.array([1, index])
        else:
            index = np.array([index, 1])

    # sort to keep the order of the first vector
    if min_N1_N2 > 1:
        perm = np.argsort(index[:,0])
        index = index[perm,:]

    return d, index 
Example 70
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 4 votes vote down vote up
def gen_speech_at_mic_stft(phi_ks, source_signals, mic_array_coord, noise_power, fs, fft_size=1024):
    """
    generate microphone signals with short time Fourier transform
    :param phi_ks: azimuth of the acoustic sources
    :param source_signals: speech signals for each arrival angle, one per row
    :param mic_array_coord: x and y coordinates of the microphone array
    :param noise_power: the variance of the microphone noise signal
    :param fs: sampling frequency
    :param fft_size: number of FFT bins
    :return: y_hat_stft: received (complex) signal at microphones
             y_hat_stft_noiseless: the noiseless received (complex) signal at microphones
    """
    frame_shift_step = np.int(fft_size / 1.)  # half block overlap for adjacent frames
    K = source_signals.shape[0]  # number of point sources
    num_mic = mic_array_coord.shape[1]  # number of microphones

    # Generate the impulse responses for the array and source directions
    impulse_response = gen_far_field_ir(np.reshape(phi_ks, (1, -1), order='F'),
                                        mic_array_coord, fs)
    # Now generate all the microphone signals
    y = np.zeros((num_mic, source_signals.shape[1] + impulse_response.shape[2] - 1), dtype=np.float32)
    for src in xrange(K):
        for mic in xrange(num_mic):
            y[mic] += fftconvolve(impulse_response[src, mic], source_signals[src])

    # Now do the short time Fourier transform
    # The resulting signal is M x fft_size/2+1 x number of frames
    y_hat_stft_noiseless = \
        np.array([pra.stft(signal, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for signal in y]) / np.sqrt(fft_size)

    # Add noise to the signals
    y_noisy = y + np.sqrt(noise_power) * np.array(np.random.randn(*y.shape), dtype=np.float32)
    # compute sources stft
    source_stft = \
        np.array([pra.stft(s_loop, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for s_loop in source_signals]) / np.sqrt(fft_size)

    y_hat_stft = \
        np.array([pra.stft(signal, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for signal in y_noisy]) / np.sqrt(fft_size)

    return y_hat_stft, y_hat_stft_noiseless, source_stft 
Example 71
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 4 votes vote down vote up
def gen_sig_at_mic_stft(phi_ks, alpha_ks, mic_array_coord, SNR, fs, fft_size=1024, Ns=256):
    """
    generate microphone signals with short time Fourier transform
    :param phi_ks: azimuth of the acoustic sources
    :param alpha_ks: power of the sources
    :param mic_array_coord: x and y coordinates of the microphone array
    :param SNR: signal to noise ratio at the microphone
    :param fs: sampling frequency
    :param fft_size: number of FFT bins
    :param Ns: number of time snapshots used to estimate covariance matrix
    :return: y_hat_stft: received (complex) signal at microphones
             y_hat_stft_noiseless: the noiseless received (complex) signal at microphones
    """
    frame_shift_step = np.int(fft_size / 1.)  # half block overlap for adjacent frames
    K = alpha_ks.shape[0]  # number of point sources
    num_mic = mic_array_coord.shape[1]  # number of microphones

    # Generate the impulse responses for the array and source directions
    impulse_response = gen_far_field_ir(np.reshape(phi_ks, (1, -1), order='F'),
                                        mic_array_coord, fs)

    # Now generate some noise
    # source_signal = np.random.randn(K, Ns * fft_size) * np.sqrt(alpha_ks[:, np.newaxis])
    source_signal = np.random.randn(K, fft_size + (Ns - 1) * frame_shift_step) * \
                    np.sqrt(np.reshape(alpha_ks, (-1, 1), order='F'))

    # Now generate all the microphone signals
    y = np.zeros((num_mic, source_signal.shape[1] + impulse_response.shape[2] - 1), dtype=np.float32)
    for src in xrange(K):
        for mic in xrange(num_mic):
            y[mic] += fftconvolve(impulse_response[src, mic], source_signal[src])

    # Now do the short time Fourier transform
    # The resulting signal is M x fft_size/2+1 x number of frames
    y_hat_stft_noiseless = \
        np.array([pra.stft(signal, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for signal in y]) / np.sqrt(fft_size)

    # compute noise variance based on SNR
    signal_energy = linalg.norm(y_hat_stft_noiseless.flatten()) ** 2
    noise_energy = signal_energy / 10 ** (SNR * 0.1)
    sigma2_noise = noise_energy / y_hat_stft_noiseless.size

    # Add noise to the signals
    y_noisy = y + np.sqrt(sigma2_noise) * np.array(np.random.randn(*y.shape), dtype=np.float32)

    y_hat_stft = \
        np.array([pra.stft(signal, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for signal in y_noisy]) / np.sqrt(fft_size)

    return y_hat_stft, y_hat_stft_noiseless 
Example 72
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: boxes_grid.py    MIT License 4 votes vote down vote up
def get_boxes_grid(image_height, image_width):
    """
    Return the boxes on image grid.
    """

    # height and width of the heatmap
    if cfg.NET_NAME == 'CaffeNet':
        height = np.floor((image_height * max(cfg.TRAIN.SCALES) - 1) / 4.0 + 1)
        height = np.floor((height - 1) / 2.0 + 1 + 0.5)
        height = np.floor((height - 1) / 2.0 + 1 + 0.5)

        width = np.floor((image_width * max(cfg.TRAIN.SCALES) - 1) / 4.0 + 1)
        width = np.floor((width - 1) / 2.0 + 1 + 0.5)
        width = np.floor((width - 1) / 2.0 + 1 + 0.5)
    elif cfg.NET_NAME == 'VGGnet':
        height = np.floor(image_height * max(cfg.TRAIN.SCALES) / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)

        width = np.floor(image_width * max(cfg.TRAIN.SCALES) / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
    else:
        assert (1), 'The network architecture is not supported in utils.get_boxes_grid!'

    # compute the grid box centers
    h = np.arange(height)
    w = np.arange(width)
    y, x = np.meshgrid(h, w, indexing='ij') 
    centers = np.dstack((x, y))
    centers = np.reshape(centers, (-1, 2))
    num = centers.shape[0]

    # compute width and height of grid box
    area = cfg.TRAIN.KERNEL_SIZE * cfg.TRAIN.KERNEL_SIZE
    aspect = cfg.TRAIN.ASPECTS  # height / width
    num_aspect = len(aspect)
    widths = np.zeros((1, num_aspect), dtype=np.float32)
    heights = np.zeros((1, num_aspect), dtype=np.float32)
    for i in range(num_aspect):
        widths[0,i] = math.sqrt(area / aspect[i])
        heights[0,i] = widths[0,i] * aspect[i]

    # construct grid boxes
    centers = np.repeat(centers, num_aspect, axis=0)
    widths = np.tile(widths, num).transpose()
    heights = np.tile(heights, num).transpose()

    x1 = np.reshape(centers[:,0], (-1, 1)) - widths * 0.5
    x2 = np.reshape(centers[:,0], (-1, 1)) + widths * 0.5
    y1 = np.reshape(centers[:,1], (-1, 1)) - heights * 0.5
    y2 = np.reshape(centers[:,1], (-1, 1)) + heights * 0.5
    
    boxes_grid = np.hstack((x1, y1, x2, y2)) / cfg.TRAIN.SPATIAL_SCALE

    return boxes_grid, centers[:,0], centers[:,1] 
Example 73
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def get_data(dataset='mnist'):
    """
    images in [-0.5, 0.5] (instead of [0, 1]) which suits C&W attack and generally gives better performance
    
    :param dataset:
    :return: 
    """
    assert dataset in ['mnist', 'cifar', 'svhn'], \
        "dataset parameter must be either 'mnist' 'cifar' or 'svhn'"
    if dataset == 'mnist':
        # the data, shuffled and split between train and test sets
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        # reshape to (n_samples, 28, 28, 1)
        X_train = X_train.reshape(-1, 28, 28, 1)
        X_test = X_test.reshape(-1, 28, 28, 1)

    elif dataset == 'cifar':
        # the data, shuffled and split between train and test sets
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    else:
        if not os.path.isfile(os.path.join(PATH_DATA, "svhn_train.mat")):
            print('Downloading SVHN train set...')
            call(
                "curl -o ../data/svhn_train.mat "
                "http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
                shell=True
            )
        if not os.path.isfile(os.path.join(PATH_DATA, "svhn_test.mat")):
            print('Downloading SVHN test set...')
            call(
                "curl -o ../data/svhn_test.mat "
                "http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
                shell=True
            )
        train = sio.loadmat(os.path.join(PATH_DATA,'svhn_train.mat'))
        test = sio.loadmat(os.path.join(PATH_DATA, 'svhn_test.mat'))
        X_train = np.transpose(train['X'], axes=[3, 0, 1, 2])
        X_test = np.transpose(test['X'], axes=[3, 0, 1, 2])
        # reshape (n_samples, 1) to (n_samples,) and change 1-index
        # to 0-index
        y_train = np.reshape(train['y'], (-1,)) - 1
        y_test = np.reshape(test['y'], (-1,)) - 1

    # cast pixels to floats, normalize to [0, 1] range
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train = ((X_train/255.0) - (1.0 - CLIP_MAX))
    X_test = (X_test/255.0) - (1.0 - CLIP_MAX)

    # one-hot-encode the labels
    Y_train = np_utils.to_categorical(y_train, 10)
    Y_test = np_utils.to_categorical(y_test, 10)

    print("X_train:", X_train.shape)
    print("Y_train:", Y_train.shape)
    print("X_test:", X_test.shape)
    print("Y_test", Y_test.shape)

    return X_train, Y_train, X_test, Y_test 
Example 74
Project: YOLOv1_tensorflow_windows   Author: FatherRen   File: main.py    GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, is_training=True):
        # 下面这些参数在Data模块介绍过
        self.classes = cfg.CLASSES
        self.num_class = len(self.classes)
        self.image_size = cfg.IMAGE_SIZE
        self.cell_size = cfg.CELL_SIZE
        self.boxes_per_cell = cfg.BOXES_PER_CELL
        self.output_size = (self.cell_size * self.cell_size) * (self.num_class + self.boxes_per_cell * 5)
        self.scale = 1.0 * self.image_size / self.cell_size
        self.boundary1 = self.cell_size * self.cell_size * self.num_class
        self.boundary2 = self.boundary1 + self.cell_size * self.cell_size * self.boxes_per_cell

        # 这些scale是在构建loss函数时各个损失的比重,使损失更加均衡
        self.object_scale = cfg.OBJECT_SCALE
        self.noobject_scale = cfg.NOOBJECT_SCALE
        self.class_scale = cfg.CLASS_SCALE
        self.coord_scale = cfg.COORD_SCALE

        # base学习率
        self.learning_rate = cfg.LEARNING_RATE
        # batch size
        self.batch_size = cfg.BATCH_SIZE
        # leaky relu中的alpha参数
        self.alpha = cfg.ALPHA

        # 坐标偏移量
        self.offset = np.transpose(np.reshape(np.array([np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell),
                                              (self.boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0))

        # 网络的输入
        self.images = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, 3], name='images')
        # 网络的输出,(batch_size, 1470)
        self.logits = self.build_network(self.images, num_outputs=self.output_size,
                                         alpha=self.alpha, is_training=is_training)

        # label
        self.labels = tf.placeholder(tf.float32, [None, self.cell_size, self.cell_size, 5 + self.num_class])
        # 损失,这里把所有的损失集中起来了
        self.loss_layer(self.logits, self.labels)
        # 得到全部损失
        self.total_loss = tf.losses.get_total_loss()
        # 记录总损失,在tensorboard中查看
        tf.summary.scalar('total_loss', self.total_loss) 
Example 75
Project: YOLOv1_tensorflow_windows   Author: FatherRen   File: main.py    GNU General Public License v3.0 4 votes vote down vote up
def interpret_output(self, output):
        probs = np.zeros((self.cell_size, self.cell_size, self.boxes_per_cell, self.num_class))
        class_probs = np.reshape(output[0:self.boundary1], (self.cell_size, self.cell_size, self.num_class))
        scales = np.reshape(output[self.boundary1:self.boundary2],
                            (self.cell_size, self.cell_size, self.boxes_per_cell))
        boxes = np.reshape(output[self.boundary2:], (self.cell_size, self.cell_size, self.boxes_per_cell, 4))
        offset = np.array([np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell)
        offset = np.transpose(np.reshape(offset, [self.boxes_per_cell, self.cell_size, self.cell_size]), (1, 2, 0))

        boxes[:, :, :, 0] += offset
        boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
        boxes[:, :, :, :2] = 1.0 * boxes[:, :, :, 0:2] / self.cell_size
        boxes[:, :, :, 2:] = np.square(boxes[:, :, :, 2:])

        boxes *= self.image_size

        for i in range(self.boxes_per_cell):
            for j in range(self.num_class):
                probs[:, :, i, j] = np.multiply(class_probs[:, :, j], scales[:, :, i])

        filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
        filter_mat_boxes = np.nonzero(filter_mat_probs)
        boxes_filtered = boxes[filter_mat_boxes[0],
                               filter_mat_boxes[1], filter_mat_boxes[2]]
        probs_filtered = probs[filter_mat_probs]
        classes_num_filtered = np.argmax(filter_mat_probs,
                                         axis=3)[filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]

        argsort = np.array(np.argsort(probs_filtered))[::-1]
        boxes_filtered = boxes_filtered[argsort]
        probs_filtered = probs_filtered[argsort]
        classes_num_filtered = classes_num_filtered[argsort]

        for i in range(len(boxes_filtered)):
            if probs_filtered[i] == 0:
                continue
            for j in range(i + 1, len(boxes_filtered)):
                if self.iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
                    probs_filtered[j] = 0.0

        filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
        boxes_filtered = boxes_filtered[filter_iou]
        probs_filtered = probs_filtered[filter_iou]
        classes_num_filtered = classes_num_filtered[filter_iou]

        result = []
        for i in range(len(boxes_filtered)):
            result.append([self.classes[classes_num_filtered[i]],
                           boxes_filtered[i][0],
                           boxes_filtered[i][1],
                           boxes_filtered[i][2],
                           boxes_filtered[i][3],
                           probs_filtered[i]])
        return result 
Example 76
Project: deep-learning-note   Author: wdxtub   File: lenet_mnist_train.py    MIT License 4 votes vote down vote up
def train(mnist):
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE,
        mnist_inference.IMAGE_SIZE,
        mnist_inference.IMAGE_SIZE,
        mnist_inference.NUM_CHANNELS], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZATION_RATE)
    y = mnist_inference.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化 TF 持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs, (BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))

            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})

            if i % 1000 == 0:
                print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
                # 保存当前模型
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) 
Example 77
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: elastic_deformation.py    MIT License 4 votes vote down vote up
def elastic_transform(images, sigma=4, control_points=3):
  """
  Elastic image transformation according to the 3D U-net paper by Cicek.
  :param images: List of images. Image is a 3 (x, y, channel) or 4 (x, y, z, channel) dimensional np.ndarray. The
  images should be of the same size.
  :param sigma: The deformation vectors at each control point location are drawn from normal distribution with 0
  mean and std dev given by elastic_sigma. Providing a tuple specifies sigma for each dimension.
  :param control_points: Count of control points of the deformation field. Providing a tuple specifies
  control_point_count for each dimension.
  :return: List of deformed images.
  """
  if not hasattr(sigma, '__len__'):
    sigma = (sigma,) * 3
  sigma = np.array(sigma)

  if not hasattr(control_points, '__len__'):
    control_points = (control_points,) * 3
  control_points = np.array(control_points)

  if not isinstance(images, list):
    images = [images]

  image_shape = images[0].shape

  if len(image_shape) == 3:  # 2D
    control_points = np.hstack((control_points[:2], np.array([1])))
    deformation_x = np.random.normal(0, sigma[0], control_points)
    deformation_y = np.random.normal(0, sigma[1], control_points)
    dc = np.zeros(control_points)
    dx = scipy.ndimage.interpolation.zoom(deformation_x, image_shape[:3] / control_points.astype(np.float32))
    dy = scipy.ndimage.interpolation.zoom(deformation_y, image_shape[:3] / control_points.astype(np.float32))
    dc = scipy.ndimage.interpolation.zoom(dc, image_shape[:3] / control_points.astype(np.float32))
    x, y, c = np.meshgrid(np.arange(image_shape[1]), np.arange(image_shape[0]), np.arange(image_shape[2]))
    indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(c + dc, (-1, 1))
  elif len(image_shape) == 4:  # 3D
    control_points = np.hstack((control_points, np.array([1])))
    deformation_x = np.random.normal(0, sigma[0], control_points)
    deformation_y = np.random.normal(0, sigma[1], control_points)
    deformation_z = np.random.normal(0, sigma[2], control_points)
    dc = np.zeros(control_points)
    dx = scipy.ndimage.interpolation.zoom(deformation_x, image_shape[:4] / control_points.astype(np.float32))
    dy = scipy.ndimage.interpolation.zoom(deformation_y, image_shape[:4] / control_points.astype(np.float32))
    dz = scipy.ndimage.interpolation.zoom(deformation_z, image_shape[:4] / control_points.astype(np.float32))
    dc = scipy.ndimage.interpolation.zoom(dc, image_shape[:4] / control_points.astype(np.float32))
    x, y, z, c = np.meshgrid(np.arange(image_shape[1]), np.arange(image_shape[0]), np.arange(image_shape[2]),
                             np.arange(image_shape[3]))
    indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z + dz, (-1, 1)), np.reshape(c + dc,
                                                                                                                (-1, 1))
  return list([
    scipy.ndimage.interpolation.map_coordinates(img, indices, mode='reflect', order=1).reshape(image_shape)
    for img in images
  ]) 
Example 78
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: text8_data.py    Apache License 2.0 4 votes vote down vote up
def __iter__(self):
        logging.info('DataIter start.')
        batch_data = []
        batch_data_mask = []
        batch_label = []
        batch_label_mask = []
        batch_label_weight = []
        start = random.randint(0, self.num_label - 1)
        for i in range(start, len(self.swd.units) - self.num_label - start, self.num_label):
            context_units = self.swd.units[i: i + self.num_label // 2] + \
                            self.swd.units[i + 1 + self.num_label // 2: i + self.num_label]
            context_mask = self.swd.weights[i: i + self.num_label // 2] + \
                           self.swd.weights[i + 1 + self.num_label // 2: i + self.num_label]
            target_units = self.swd.units[i + self.num_label // 2]
            target_word = self.swd.data[i + self.num_label // 2]
            if self.swd.freq[target_word] < self.min_count:
                continue
            indices = self.sample_ne_indices()
            target = [target_units] + [self.swd.negative_units[i] for i in indices]
            target_weight = [1.0] + [0.0 for _ in range(self.num_label - 1)]
            target_mask = [self.swd.weights[i + self.num_label // 2]] +\
                          [self.swd.negative_weights[i] for i in indices]

            batch_data.append(context_units)
            batch_data_mask.append(context_mask)
            batch_label.append(target)
            batch_label_mask.append(target_mask)
            batch_label_weight.append(target_weight)

            if len(batch_data) == self.batch_size:
                # reshape for broadcast_mul
                batch_data_mask = np.reshape(
                    batch_data_mask, (self.batch_size, self.num_label - 1, self.swd.max_len, 1))
                batch_label_mask = np.reshape(
                    batch_label_mask, (self.batch_size, self.num_label, self.swd.max_len, 1))
                data_all = [mx.nd.array(batch_data), mx.nd.array(batch_data_mask)]
                label_all = [
                    mx.nd.array(batch_label),
                    mx.nd.array(batch_label_weight),
                    mx.nd.array(batch_label_mask)
                ]
                data_names = ['data', 'mask']
                label_names = ['label', 'label_weight', 'label_mask']
                # clean up
                batch_data = []
                batch_data_mask = []
                batch_label = []
                batch_label_weight = []
                batch_label_mask = []
                yield SimpleBatch(data_names, data_all, label_names, label_all) 
Example 79
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detection.py    Apache License 2.0 4 votes vote down vote up
def sync_label_shape(self, it, verbose=False):
        """Synchronize label shape with the input iterator. This is useful when
        train/validation iterators have different label padding.

        Parameters
        ----------
        it : ImageDetIter
            The other iterator to synchronize
        verbose : bool
            Print verbose log if true

        Returns
        -------
        ImageDetIter
            The synchronized other iterator, the internal label shape is updated as well.

        Examples
        --------
        >>> train_iter = mx.image.ImageDetIter(32, (3, 300, 300), path_imgrec='train.rec')
        >>> val_iter = mx.image.ImageDetIter(32, (3, 300, 300), path.imgrec='val.rec')
        >>> train_iter.label_shape
        (30, 6)
        >>> val_iter.label_shape
        (25, 6)
        >>> val_iter = train_iter.sync_label_shape(val_iter, verbose=False)
        >>> train_iter.label_shape
        (30, 6)
        >>> val_iter.label_shape
        (30, 6)
        """
        assert isinstance(it, ImageDetIter), 'Synchronize with invalid iterator.'
        train_label_shape = self.label_shape
        val_label_shape = it.label_shape
        assert train_label_shape[1] == val_label_shape[1], "object width mismatch."
        max_count = max(train_label_shape[0], val_label_shape[0])
        if max_count > train_label_shape[0]:
            self.reshape(None, (max_count, train_label_shape[1]))
        if max_count > val_label_shape[0]:
            it.reshape(None, (max_count, val_label_shape[1]))
        if verbose and max_count > min(train_label_shape[0], val_label_shape[0]):
            logging.info('Resized label_shape to (%d, %d).', max_count, train_label_shape[1])
        return it 
Example 80
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def test_regression():
    ''' test regression operator '''
    def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
        # init executor
        data = mx.symbol.Variable('data')
        label = mx.symbol.Variable('label', stype=stype)
        out = symbol(data, label)
        grad_req = {'data': 'write', 'label': 'null'}
        out_exec = out.simple_bind(default_context(), grad_req=grad_req,
            data=shape, label=shape)
        arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
        grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
        # init data
        arr_data = mx.random.uniform(-1, 1, shape)
        arg_map["data"][:] = arr_data
        # init label based on density
        arr_label = arg_map["label"]
        atol = 1e-5
        for density in densities:
            arr_label[:] = rand_ndarray(shape, stype, density=density)
            out_exec.forward(is_train=True)
            out_exec.backward()
            np_out = forward(arr_data.asnumpy())
            out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
            assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
            assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)

    shape = (50, 30)

    check_regression(mx.symbol.LogisticRegressionOutput,
                     lambda x: 1.0 / (1.0 + np.exp(-x)),
                     lambda x, y : x - y,
                     shape)
    check_regression(mx.symbol.LinearRegressionOutput,
                     lambda x: x,
                     lambda x, y : x - y,
                     shape)
    check_regression(mx.symbol.MAERegressionOutput,
                     lambda x: x,
                     lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
                     shape)
    check_regression(mx.symbol.LogisticRegressionOutput,
                     lambda x: 1.0 / (1.0 + np.exp(-x)),
                     lambda x, y : x - y,
                     shape, stype='csr')
    check_regression(mx.symbol.LinearRegressionOutput,
                     lambda x: x,
                     lambda x, y : x - y,
                     shape, stype='csr')