Python tensorflow.session() Examples

The following are 30 code examples of tensorflow.session(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: crossling_emb.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def run(self, sess, local_lr):
        """
        Runs whole GAN
        Arguments:
            sess(tf.session): Tensorflow Session
            local_lr(float): Learning rate
        """
        disc_cost_acc = []
        n_words_proc = 0
        tic = time.time()
        for iters in range(0, self.iters_epoch, self.batch_size):
            # 1.Run the discriminator
            for _ in range(self.disc_runs):
                disc_result = self.run_discriminator(sess, local_lr)
                disc_cost_acc.append(disc_result[0])
            # 2.Run the Generator
            n_words_proc += self.run_generator(sess, local_lr)
            # 3.Report the metrics
            self.report_metrics(iters, n_words_proc, disc_cost_acc, tic) 
Example #2
Source File: session.py    From faceswap with GNU General Public License v3.0 6 votes vote down vote up
def predict(self, feed, batch_size=None):
        """ Get predictions from the model in the correct session.

        This method is a wrapper for :func:`keras.predict()` function.

        Parameters
        ----------
        feed: numpy.ndarray or list
            The feed to be provided to the model as input. This should be a ``numpy.ndarray``
            for single inputs or a ``list`` of ``numpy.ndarrays`` for multiple inputs.
        """
        if self._session is None:
            if batch_size is None:
                return self._model.predict(feed)
            return self._amd_predict_with_optimized_batchsizes(feed, batch_size)

        with self._session.as_default():  # pylint: disable=not-context-manager
            with self._session.graph.as_default():
                return self._model.predict(feed, batch_size=batch_size) 
Example #3
Source File: session.py    From faceswap with GNU General Public License v3.0 6 votes vote down vote up
def _set_session(self, allow_growth):
        """ Sets the session and graph.

        If the backend is AMD then this does nothing and the global ``Keras`` ``Session``
        is used
        """
        if get_backend() == "amd":
            return None

        self.graph = tf.Graph()
        config = tf.ConfigProto()
        if allow_growth and get_backend() == "nvidia":
            config.gpu_options.allow_growth = True
        try:
            session = tf.Session(graph=tf.Graph(), config=config)
        except tf_error.InternalError as err:
            if "driver version is insufficient" in str(err):
                msg = ("Your Nvidia Graphics Driver is insufficient for running Faceswap. "
                       "Please upgrade to the latest version.")
                raise FaceswapError(msg) from err
            raise err
        logger.debug("Created tf.session: (graph: %s, session: %s, config: %s)",
                     session.graph, session, config)
        return session 
Example #4
Source File: session.py    From faceswap with GNU General Public License v3.0 6 votes vote down vote up
def define_model(self, function):
        """ Defines a given model in the correct session.

        This method acts as a wrapper for :class:`keras.models.Model()` to ensure that the model
        is defined within it's own graph.

        Parameters
        ----------
        function: function
            A function that defines a :class:`keras.Model` and returns it's ``inputs`` and
            ``outputs``. The function that generates these results should be passed in, NOT the
            results themselves, as the function needs to be executed within the correct context.
        """
        if self._session is None:
            self._model = Model(*function())
        else:
            with self._session.as_default():  # pylint: disable=not-context-manager
                with self._session.graph.as_default():
                    self._model = Model(*function()) 
Example #5
Source File: embedding_intent_classifier.py    From rasa_nlu with Apache License 2.0 6 votes vote down vote up
def _output_training_stat(self,
                              X: np.ndarray,
                              intents_for_X: np.ndarray,
                              is_training: 'tf.Tensor') -> np.ndarray:
        """Output training statistics"""

        n = self.evaluate_on_num_examples
        ids = np.random.permutation(len(X))[:n]
        all_Y = self._create_all_Y(X[ids].shape[0])

        train_sim = self.session.run(self.sim_op,
                                     feed_dict={self.a_in: X[ids],
                                                self.b_in: all_Y,
                                                is_training: False})

        train_acc = np.mean(np.argmax(train_sim, -1) == intents_for_X[ids])
        return train_acc 
Example #6
Source File: embedding_intent_classifier.py    From Rasa_NLU_Chi with Apache License 2.0 6 votes vote down vote up
def _calculate_message_sim(self, X, all_Y):
        """Load tf graph and calculate message similarities"""

        a_in = self.embedding_placeholder
        b_in = self.intent_placeholder

        sim = self.similarity_op
        sess = self.session

        message_sim = sess.run(sim, feed_dict={a_in: X,
                                               b_in: all_Y})
        message_sim = message_sim.flatten()  # sim is a matrix

        intent_ids = message_sim.argsort()[::-1]
        message_sim[::-1].sort()

        # transform sim to python list for JSON serializing
        message_sim = message_sim.tolist()

        return intent_ids, message_sim 
Example #7
Source File: starspace_intent_classifier.py    From ai-chatbot-framework with MIT License 6 votes vote down vote up
def _calculate_message_sim(self, X, all_Y):
        """Load tf graph and calculate message similarities"""

        a_in = self.embedding_placeholder
        b_in = self.intent_placeholder

        sim = self.similarity_op
        sess = self.session

        message_sim = sess.run(sim, feed_dict={a_in: X,
                                               b_in: all_Y})
        message_sim = message_sim.flatten()  # sim is a matrix

        intent_ids = message_sim.argsort()[::-1]
        message_sim[::-1].sort()

        # transform sim to python list for JSON serializing
        message_sim = message_sim.tolist()

        return intent_ids, message_sim 
Example #8
Source File: crossling_emb.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def run_generator(self, sess, local_lr):
        """
        Runs generator part of GAN
        Arguments:
            sess(tf.session): Tensorflow Session
            local_lr(float): Learning rate
        Returns:
            Returns number of words processed
        """
        # Generate random ids to look up
        src_ids = np.random.choice(self.vocab_size, self.batch_size, replace=False)
        tgt_ids = np.random.choice(self.vocab_size, self.batch_size, replace=False)
        train_dict = {
            self.generator.src_ph: src_ids,
            self.generator.tgt_ph: tgt_ids,
            self.discriminator.do_ph: 1.0,
            self.lr_ph: local_lr,
        }
        sess.run(self.generator.map_opt, feed_dict=train_dict)
        # Run orthogonalize
        sess.run(self.generator.assign_weight)
        return 2 * self.batch_size 
Example #9
Source File: crossling_emb.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def run_discriminator(self, sess, local_lr):
        """
        Runs discriminator part of GAN
        Arguments:
            sess(tf.session): Tensorflow Session
            local_lr(float): Learning rate
        """
        # Generate random ids to look up
        src_ids = np.random.choice(self.most_freq, self.batch_size, replace=False)
        tgt_ids = np.random.choice(self.most_freq, self.batch_size, replace=False)
        train_dict = {
            self.generator.src_ph: src_ids,
            self.generator.tgt_ph: tgt_ids,
            self.discriminator.do_ph: 0.9,
            self.lr_ph: local_lr,
        }
        return sess.run(
            [self.discriminator.disc_cost, self.discriminator.disc_opt], feed_dict=train_dict
        ) 
Example #10
Source File: embedding_intent_classifier.py    From rasa_nlu with Apache License 2.0 6 votes vote down vote up
def _calculate_message_sim(self,
                               X: np.ndarray,
                               all_Y: np.ndarray
                               ) -> Tuple[np.ndarray, List[float]]:
        """Load tf graph and calculate message similarities"""

        message_sim = self.session.run(self.sim_op,
                                       feed_dict={self.a_in: X,
                                                  self.b_in: all_Y})
        message_sim = message_sim.flatten()  # sim is a matrix

        intent_ids = message_sim.argsort()[::-1]
        message_sim[::-1].sort()

        if self.similarity_type == 'cosine':
            # clip negative values to zero
            message_sim[message_sim < 0] = 0
        elif self.similarity_type == 'inner':
            # normalize result to [0, 1] with softmax
            message_sim = np.exp(message_sim)
            message_sim /= np.sum(message_sim)

        # transform sim to python list for JSON serializing
        return intent_ids, message_sim.tolist() 
Example #11
Source File: crossling_emb.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def apply_procrustes(self, sess, final_pairs):
        """
        Applies procrustes to W matrix for better mapping
        Arguments:
            sess(tf.session): Tensorflow Session
            final_pairs(ndarray): Array of pairs which are mutual neighbors
        """
        print("Applying solution of Procrustes problem to get better mapping...")
        proc_dict = {
            self.generator.src_ph: final_pairs[:, 0],
            self.generator.tgt_ph: final_pairs[:, 1],
        }
        A, B = sess.run([self.generator.src_emb, self.generator.tgt_emb], feed_dict=proc_dict)
        # pylint: disable=no-member
        R = scipy.linalg.orthogonal_procrustes(A, B)
        sess.run(tf.assign(self.generator.W, R[0])) 
Example #12
Source File: evaluate.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def calc_nn_acc(self, sess, batch_size=512):
        """
        Evaluates accuracy of mapping using Nearest neighbors
        Arguments:
            sess(tf.session): Tensorflow Session
            batch_size(int): Size of batch
        """
        top_matches = []
        eval_size = len(self.src_ind)

        # Loop through all the eval dataset
        for i in range(0, eval_size, batch_size):
            src_ids = [self.src_ind[x] for x in range(i, min(i + batch_size, eval_size))]
            eval_dict = {self.src_ph: src_ids, self.tgt_ph: self.tgt_ids}
            matches = sess.run(self.eval_nn, feed_dict=eval_dict)
            top_matches.append(matches[1])
        top_matches = np.concatenate(top_matches)

        print("Accuracy using Nearest Neighbors is")
        self.calc_accuracy(top_matches) 
Example #13
Source File: DeeProtein.py    From AiGEM_TeamHeidelberg2017 with MIT License 6 votes vote down vote up
def initialize_helpers(self):
        """Initialize the model and call all graph constructing ops.

        This function is a wrapper for all initialization ops in the model.
        """
        if self._opts._allowsoftplacement == 'True':
            config = tf.ConfigProto(allow_soft_placement=True)
        else:
            config = tf.ConfigProto(allow_soft_placement=False)

        # allow growth to surveil the consumed GPU memory
        config.gpu_options.allow_growth = True
        # open a session:
        self.session = tf.Session(config=config)

        self.log_file.write('Initialized Batch_Generator with MODE: %s\n' % self._opts._batchgenmode)
        self.batchgen = helpers.BatchGenerator(self._opts)

        self.log_file.write('Initialized ROC_tracker\n')
        self.ROCtracker = helpers.RocTracker(self._opts) 
Example #14
Source File: evaluate.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def calc_csls_score(self, sess, batch_size=512):
        """
        Calculates similarity score between two embeddings
        Arguments:
            sess(tf.session): Tensorflow Session
            batch_size(int): Size of batch to process
        Returns:
            Returns similarity score numpy array
        """
        score_val = []
        eval_size = len(self.src_ind)
        # Calculate scores
        for i in range(0, eval_size, batch_size):
            score_src_ids = [self.src_ind[x] for x in range(i, min(i + batch_size, eval_size))]
            eval_dict = {self.src_ph: score_src_ids, self.tgt_ph: self.tgt_ids}
            score_val.append(sess.run(self.csls_subgraphs["ScoreGraph"], feed_dict=eval_dict))
        score_val = np.concatenate(score_val)
        return score_val 
Example #15
Source File: evaluate.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def calc_avg_dist(self, sess, batch_size=512):
        """
        Calculates average distance between two embeddings
        Arguments:
            sess(tf.session): Tensorflow session
            batch_size(int): batch_size
        Returns:
            Returns numpy array of average values of size vocab_size
        """
        avg1_val = []
        avg2_val = []

        # Calculate Average
        for i in range(0, self.vocab_size, batch_size):
            avg_src_ids = [x for x in range(i, min(i + batch_size, self.vocab_size))]
            avg1_dict = {self.src_ph: avg_src_ids, self.tgt_ph: self.tgt_ids}
            avg1_val.append(sess.run(self.csls_subgraphs["Avg1S2T"], feed_dict=avg1_dict))
            avg2_val.append(sess.run(self.csls_subgraphs["Avg2S2T"], feed_dict=avg1_dict))
        avg1_val = np.concatenate(avg1_val)
        avg2_val = np.concatenate(avg2_val)
        return avg1_val, avg2_val 
Example #16
Source File: evaluate.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def run_csls_metrics(self, sess, batch_size=512):
        """
        Runs the whole CSLS metrics
        Arguments:
            sess(tf.session): Tensorflow Session
            batch_size(int): Batch Size
        """
        top_matches = []
        score = self.calc_csls_score(sess)
        avg1, avg2 = self.calc_avg_dist(sess)
        csls_scores = 2 * score - (avg1[self.src_ind][:, None] + avg2[None, :])
        # Calculate top matches
        for i in range(0, len(self.src_ind), batch_size):
            scores = [csls_scores[x] for x in range(i, min(i + batch_size, len(self.src_ind)))]
            top_matches_val = sess.run(
                self.csls_subgraphs["Top100"], feed_dict={self.score_ph: scores}
            )[1]
            top_matches.append(top_matches_val)
        top_matches = np.concatenate(top_matches)
        print("Accuracy using CSLS is")
        self.calc_accuracy(top_matches)
        self.calc_csls(sess) 
Example #17
Source File: embedding_intent_classifier.py    From Rasa_NLU_Chi with Apache License 2.0 5 votes vote down vote up
def process(self, message, **kwargs):
        # type: (Message, **Any) -> None
        """Return the most likely intent and its similarity to the input."""

        intent = {"name": None, "confidence": 0.0}
        intent_ranking = []

        if self.session is None:
            logger.error("There is no trained tf.session: "
                         "component is either not trained or "
                         "didn't receive enough training data")

        else:
            # get features (bag of words) for a message
            X = message.get("text_features").reshape(1, -1)

            # stack encoded_all_intents on top of each other
            # to create candidates for test examples
            all_Y = self._create_all_Y(X.shape[0])

            # load tf graph and session
            intent_ids, message_sim = self._calculate_message_sim(X, all_Y)

            if intent_ids.size > 0:
                intent = {"name": self.inv_intent_dict[intent_ids[0]],
                          "confidence": message_sim[0]}

                ranking = list(zip(list(intent_ids), message_sim))
                ranking = ranking[:INTENT_RANKING_LENGTH]
                intent_ranking = [{"name": self.inv_intent_dict[intent_idx],
                                   "confidence": score}
                                  for intent_idx, score in ranking]

        message.set("intent", intent, add_to_output=True)
        message.set("intent_ranking", intent_ranking, add_to_output=True) 
Example #18
Source File: starspace_intent_classifier.py    From ai-chatbot-framework with MIT License 5 votes vote down vote up
def process(self, query, INTENT_RANKING_LENGTH=5):
        """Return the most likely intent and its similarity to the input."""

        message = self.transform(query)

        intent = {"name": None, "confidence": 0.0}
        intent_ranking = []

        if self.session is None:
            app.logger.error("There is no trained tf.session: "
                             "component is either not trained or "
                             "didn't receive enough training data")

        else:
            # get features (bag of words) for a message
            X = message.get("text_features").reshape(1, -1)

            # stack encoded_all_intents on top of each other
            # to create candidates for test examples
            all_Y = self._create_all_Y(X.shape[0])

            # load tf graph and session
            intent_ids, message_sim = self._calculate_message_sim(X, all_Y)

            if intent_ids.size > 0:
                intent = {"intent": self.inv_intent_dict[intent_ids[0]],
                          "confidence": message_sim[0]}

                ranking = list(zip(list(intent_ids), message_sim))

                ranking = ranking[:INTENT_RANKING_LENGTH]

                intent_ranking = [{"intent": self.inv_intent_dict[intent_idx],
                                   "confidence": score}
                                  for intent_idx, score in ranking]

        return intent, intent_ranking 
Example #19
Source File: inference.py    From fully-convolutional-point-network with MIT License 5 votes vote down vote up
def run_model(sess, placeholders, pred_op, points, pointnet_locations, constant_features):
    """ Passes points through the model.

        Args:
        sess: tf.session
        placeholders: dict
        pred_op: tf.tensor
        points: np.array
        pointnet_locations: np.array
        constant_features: np.array

        Returns: np.array

    """

    points_and_pointnet_locations = np.expand_dims(np.concatenate([points, pointnet_locations], axis=0), axis=0)

    start_time = time.time()
    predictions = sess.run(pred_op, feed_dict={
        placeholders['is_training_pl']: False,
        placeholders['points_xyz_pl']: points_and_pointnet_locations,
        placeholders['points_features_pl']: constant_features
    })
    end_time = time.time()
    print 'Prediction took: %ds' % (end_time - start_time)

    return predictions 
Example #20
Source File: inference.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def load_model(self):
          sess=tf.session()
          x=tf.placeholder(dtype=tf.float32, shape=[1, 32, 100, 3], name='input')
          net=crnn_model.ShadowNet(phase=phase_tensor, hidden_nums=256, layers_nums=2, seq_length=15, num_classes=config.cfg.TRAIN.CLASSES_NUMS, rnn_cell_type='lstm')
          with tf.variable_scope('shadow'):
               net_out, tensor_dict = net.build_shadownet(inputdata=inputdata)
          decodes, _ = tf.nn.ctc_beam_search_decoder(inputs=net_out, sequence_length=20*np.ones(1), merge_repeated=False)
          
          saver = tf.train.Saver()
          params_file = tf.train.latest_checkpoint(self.model_dir)
          saver.restore(sess=sess, save_path=params_file)
          self.output['sess'] = sess
          self.output['x'] = x
          self.output['y'] = decodes 
Example #21
Source File: session.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def load_model(self):
        """ Loads a model within the correct session.

        This method is a wrapper for :func:`keras.models.load_model()`. Loads a model and its
        weights from :attr:`model_path`. Any additional ``kwargs`` to be passed to
        :func:`keras.models.load_model()` should also be defined during initialization of the
        class.
        """
        logger.verbose("Initializing plugin model: %s", self._name)
        if self._session is None:
            self._model = k_load_model(self._model_path, **self._model_kwargs)
        else:
            with self._session.as_default():  # pylint: disable=not-context-manager
                with self._session.graph.as_default():
                    self._model = k_load_model(self._model_path, **self._model_kwargs) 
Example #22
Source File: session.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def load_model_weights(self):
        """ Load model weights for a defined model inside the correct session.

        This method is a wrapper for :class:`keras.load_weights()`. Once a model has been defined
        in :func:`define_model()` this method can be called to load its weights in the correct
        graph from the :attr:`model_path` defined during initialization of this class.
        """
        logger.verbose("Initializing plugin model: %s", self._name)
        if self._session is None:
            self._model.load_weights(self._model_path)
        else:
            with self._session.as_default():  # pylint: disable=not-context-manager
                with self._session.graph.as_default():
                    self._model.load_weights(self._model_path) 
Example #23
Source File: DeeProtein.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def save_model(self, network, session, step, name='DeeProtein'):
        """Saves the model into .npz and ckpt files.
        Save the dataset to a file.npz so the model can be reloaded. The model is saved in the
        checkpoints folder under the directory specified in the config dict under the
        summaries_dir key.

        Args:
          network: `tl.layerobj` holding the network.
          session: `tf.session` object from which to save the model.
          step: `int32` the global step of the training process.
          name: `str` The name of the network-part that is to save.
        """
        # save model as dict:
        param_save_dir = os.path.join(self._opts._summariesdir,
                                      'checkpoint_saves/')
        # everything but the outlayers
        conv_vars = [var for var in network.all_params
                     if 'dense' and 'outlayer' not in var.name]

        if not os.path.exists(param_save_dir):
            os.makedirs(param_save_dir)
        if conv_vars:
            tl.files.save_npz_dict(conv_vars,
                                   name=os.path.join(param_save_dir,
                                                     '%s_conv_part.npz' % name),
                                   sess=session)
        tl.files.save_npz_dict(network.all_params,
                               name=os.path.join(param_save_dir,
                                                 '%s_complete.npz' % name),
                               sess=session)

        # save also as checkpoint
        ckpt_file_path = os.path.join(param_save_dir, '%s.ckpt' % name)
        self.saver.save(session, ckpt_file_path, global_step=step) 
Example #24
Source File: crossling_emb.py    From nlp-architect with Apache License 2.0 5 votes vote down vote up
def save_model(self, save_model, sess):
        """
        Saves W in mapper as numpy array based on CSLS criterion
        Arguments:
            save_model(bool): Save model if True
            sess(tf.session): Tensorflow Session
        """
        if save_model:
            print("Saving model ....")
            model_W = sess.run(self.generator.W)
            path = os.path.join(self.save_dir, "W_best_mapping")
            np.save(path, model_W) 
Example #25
Source File: evaluate.py    From nlp-architect with Apache License 2.0 5 votes vote down vote up
def calc_csls(self, sess):
        """
        Calculates the value of CSLS criterion
        Arguments:
            sess(tf.session): Tensorflow session
        """
        good_pairs = self.generate_dictionary(sess)
        eval_dict = {self.src_ph: good_pairs[0], self.tgt_ph: good_pairs[1]}
        cos_mean = sess.run(self.csls_subgraphs["CSLS_Criteria"], feed_dict=eval_dict)
        print("CSLS Score is " + str(cos_mean))

        # Drop LR only after the second drop in CSLS
        if cos_mean < self.best_cos_score:
            self.drop_lr = True & self.second_drop
            self.second_drop = True

        # Save model whenever cos score is better than saved score
        if cos_mean > self.best_cos_score:
            self.save_model = True
        else:
            self.save_model = False

        # Update best cos score
        if cos_mean > self.best_cos_score:
            self.best_cos_score = cos_mean
            self.drop_lr = False 
Example #26
Source File: evaluate.py    From nlp-architect with Apache License 2.0 5 votes vote down vote up
def generate_dictionary(self, sess, dict_type="S2T"):
        """
        Generates best translation pairs
        Arguments:
             sess(tf.session): Tensorflow session
             dict_type(str): S2T-Source2Target, S2T&T2S (Both)
        Returns:
            Numpy array of max_dict x 2 or smaller
        """
        avg1, avg2 = self.calc_avg_dist(sess)
        s2t_dico = self.get_candidates(sess, avg1, avg2)
        print("Completed generating S2T dictionary of size " + str(len(s2t_dico)))
        if dict_type == "S2T":
            map_src_ind = np.asarray([s2t_dico[x][0] for x in range(len(s2t_dico))])
            tra_tgt_ind = np.asarray([s2t_dico[x][1] for x in range(len(s2t_dico))])
            return [map_src_ind, tra_tgt_ind]
        if dict_type == "S2T&T2S":
            # This case we are running Target 2 Source mappings
            t2s_dico = self.get_candidates(sess, avg2, avg1, swap_score=True)
            print("Completed generating T2S dictionary of size " + str(len(t2s_dico)))
            t2s_dico = np.concatenate([t2s_dico[:, 1:], t2s_dico[:, :1]], 1)
            # Find the common pairs between S2T and T2S
            s2t_candi = set([(a, b) for a, b in s2t_dico])
            t2s_candi = set([(a, b) for a, b in t2s_dico])
            final_pairs = s2t_candi & t2s_candi
            dico = np.asarray(list([[a, b] for (a, b) in final_pairs]))
            print("Completed generating final dictionary of size " + str(len(final_pairs)))
            return dico 
Example #27
Source File: DeeProtein.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def restore_model_from_checkpoint(self, ckptpath, session):
        """Restores the model from checkpoint in `Session`. This function is deprected, please
           use the regular restore ops from npz-files.

        Args:
          ckptpath: A `str`. The path to the checkpoint file.
          session: The `tf.session` in which to restore the model.
        """
        # Restore variables from disk.
        self.log_file.write('[*] Loading checkpoints from %s\n' % ckptpath)
        self.saver.restore(session, ckptpath)
        self.log_file.write('[*] Restored checkpoints!\n') 
Example #28
Source File: embedding_intent_classifier.py    From rasa_nlu with Apache License 2.0 5 votes vote down vote up
def process(self, message: 'Message', **kwargs: Any) -> None:
        """Return the most likely intent and its similarity to the input."""

        intent = {"name": None, "confidence": 0.0}
        intent_ranking = []

        if self.session is None:
            logger.error("There is no trained tf.session: "
                         "component is either not trained or "
                         "didn't receive enough training data")

        else:
            # get features (bag of words) for a message
            # noinspection PyPep8Naming
            X = message.get("text_features").reshape(1, -1)

            # stack encoded_all_intents on top of each other
            # to create candidates for test examples
            # noinspection PyPep8Naming
            all_Y = self._create_all_Y(X.shape[0])

            # load tf graph and session
            intent_ids, message_sim = self._calculate_message_sim(X, all_Y)

            # if X contains all zeros do not predict some label
            if X.any() and intent_ids.size > 0:
                intent = {"name": self.inv_intent_dict[intent_ids[0]],
                          "confidence": message_sim[0]}

                ranking = list(zip(list(intent_ids), message_sim))
                ranking = ranking[:INTENT_RANKING_LENGTH]
                intent_ranking = [{"name": self.inv_intent_dict[intent_idx],
                                   "confidence": score}
                                  for intent_idx, score in ranking]

        message.set("intent", intent, add_to_output=True)
        message.set("intent_ranking", intent_ranking, add_to_output=True) 
Example #29
Source File: DeeProtein.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def load_conv_weights_npz(self, network, session, name='DeeProtein'):
        """Loads the model up to the last convolutional layer.
        Load the weights for the convolutional layers from a pretrained model.
        Automatically uses the path specified in the config dict under restore_path.

        Args:
          network: `tl.layer` Object holding the network.
          session: `tf.Session` the tensorflow session of whcih to save the model.
          name: `str`, name for the currect network to load. Although optional if multiple
            models are restored, the files are identified by name (optional).
        Returns:
          A tl.Layer object of same size as input, holding the updated weights.
        """
        # check if filepath exists:
        file = os.path.join(self._opts._restorepath, '%s_conv_part.npz' % name)
        self.log_file.write('[*] Loading %s\n' % file)
        if not tl.files.file_exists(file):
            self.log_file.write('[*] Loading %s FAILED. File not found.\n' % file)
            self.log_file.write('Trying to download weights from iGEM-HD-2017.\n')
            weights_dir = self.download_weights()
            file = os.path.join(weights_dir, '%s_conv_part.npz' % name)
            if not tl.files.file_exists(file):
                self.log_file.write('[*] Download weights from iGEM-HD-2017 FAILED. ABORTING.\n')
                exit(-1)
            else:
                self.log_file.write('Download successful.\n')
                pass
        # custom load_ckpt op:
        d = np.load(file)
        params = [val[1] for val in sorted(d.items(), key=lambda tup: int(tup[0]))]
        # params = [p for p in params if not 'outlayer' in p.name]
        # original OP:
        # params = tl.files.load_npz_dict(name=file)
        # if name == 'Classifier':
        #     params = [p for p in params[:-4]]
        tl.files.assign_params(session, params, network)
        self.log_file.write('[*] Restored conv weights!\n')
        return network 
Example #30
Source File: DeeProtein.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def load_model_weights(self, network, session, name='DeeProtein'):
        """Load the weights for the convolutional layers from a pretrained model.
        If include outlayer is set to True, the outlayers are restored as well,
        otherwise the network is restored without outlayers.

        Args:
          network: `tl.layer` Object holding the network.
          session: `tf.Session` the tensorflow session of whcih to save the model.
          name: `str`, name for the currect network to load. Although optional if multiple
            models are restored, the files are identified by name (optional).
        Returns:
          A tl.Layer object of same size as input, holding the updated weights.
        """
        # check if filepath exists:
        file = os.path.join(self._opts._restorepath, '%s_complete.npz' % name)
        if not tl.files.file_exists(file):
            self.log_file.write('[*] Loading %s FAILED. File not found.\n' % file)
            if self._opts._nclasses == 886:
                self.log_file.write('[*] Suitable weigths found on iGEM-Servers.\n')
                self.log_file.write('Trying to download weights from iGEM-HD-2017.\n')
                weights_dir = self.download_weights()
                file = os.path.join(weights_dir, '%s_conv_part.npz' % name)
                if not tl.files.file_exists(file):
                    self.log_file.write('[*] Download weights from iGEM-HD-2017 FAILED. ABORTING.\n')
                    exit(-1)
                else:
                    self.log_file.write('Download successful.\n')
                    pass
            else:
                self.log_file.write('[*] No suitable weights on Servers. ABORTING.\n')
                exit(-1)

        # custom load_ckpt op:
        d = np.load(file)
        params = [val[1] for val in sorted(d.items(), key=lambda tup: int(tup[0]))]
        tl.files.assign_params(session, params, network)
        self.log_file.write('[*] Restored model weights!\n')
        print('[*] Restored model weights!\n')
        return network