Python hmmlearn.hmm.GaussianHMM() Examples

The following are 24 code examples for showing how to use hmmlearn.hmm.GaussianHMM(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module hmmlearn.hmm , or try the search function .

Example 1
Project: Voice-based-gender-recognition   Author: SuperKogito   File: ModelsTrainer.py    License: MIT License 6 votes vote down vote up
def process(self):
        females, males = self.get_file_paths(self.females_training_path,
                                             self.males_training_path)
        # collect voice features
        female_voice_features = self.collect_features(females)
        male_voice_features   = self.collect_features(males)
        # generate gaussian mixture models
        females_gmm = hmm.GaussianHMM(n_components=3)
        males_gmm   = hmm.GaussianHMM(n_components=3)
        ubm         = hmm.GaussianHMM(n_components=3)
        # fit features to models
        females_gmm.fit(female_voice_features)
        males_gmm.fit(male_voice_features)
        ubm.fit(np.vstack((female_voice_features, male_voice_features)))
        # save models
        self.save_gmm(females_gmm, "females")
        self.save_gmm(males_gmm,   "males")
        self.save_gmm(males_gmm,   "ubm") 
Example 2
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_score_samples_and_decode(self):
        h = hmm.GaussianHMM(self.n_components, self.covariance_type,
                            init_params="st")
        h.means_ = self.means
        h.covars_ = self.covars

        # Make sure the means are far apart so posteriors.argmax()
        # picks the actual component used to generate the observations.
        h.means_ = 20 * h.means_

        gaussidx = np.repeat(np.arange(self.n_components), 5)
        n_samples = len(gaussidx)
        X = self.prng.randn(n_samples, self.n_features) + h.means_[gaussidx]
        h._init(X)
        ll, posteriors = h.score_samples(X)

        assert posteriors.shape == (n_samples, self.n_components)
        assert np.allclose(posteriors.sum(axis=1), np.ones(n_samples))

        viterbi_ll, stateseq = h.decode(X)
        assert np.allclose(stateseq, gaussidx) 
Example 3
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fit_zero_variance(self):
        # Example from issue #2 on GitHub.
        X = np.asarray([
            [7.15000000e+02, 5.85000000e+02, 0.00000000e+00, 0.00000000e+00],
            [7.15000000e+02, 5.20000000e+02, 1.04705811e+00, -6.03696289e+01],
            [7.15000000e+02, 4.55000000e+02, 7.20886230e-01, -5.27055664e+01],
            [7.15000000e+02, 3.90000000e+02, -4.57946777e-01, -7.80605469e+01],
            [7.15000000e+02, 3.25000000e+02, -6.43127441e+00, -5.59954834e+01],
            [7.15000000e+02, 2.60000000e+02, -2.90063477e+00, -7.80220947e+01],
            [7.15000000e+02, 1.95000000e+02, 8.45532227e+00, -7.03294373e+01],
            [7.15000000e+02, 1.30000000e+02, 4.09387207e+00, -5.83621216e+01],
            [7.15000000e+02, 6.50000000e+01, -1.21667480e+00, -4.48131409e+01]
        ])

        h = hmm.GaussianHMM(3, self.covariance_type)
        h.fit(X) 
Example 4
Project: Stock-Market-Trend-Analysis-Using-HMM-LSTM   Author: JINGEWU   File: GMM_HMM.py    License: MIT License 6 votes vote down vote up
def GMM_HMM(O, lengths, n_states, verbose=False):
    # the first step initial a GMM_HMM model
    # input:
    #     O, array, (n_samples, n_features), observation
    #     lengths, list, lengths of sequence
    #     n_states, number of states
    # output:
    #     S, the best state sequence
    #     A, the transition probability matrix of the HMM model

    # model = hmm.GMMHMM(n_components=n_states, n_mix=4, covariance_type="diag", n_iter=1000, verbose=verbose).fit(O, lengths)
    model = hmm.GaussianHMM(n_components=n_states, covariance_type='diag', n_iter=1000, verbose=verbose).fit(O, lengths)

    pi = model.startprob_
    A = model.transmat_
    _, S = model.decode(O, algorithm='viterbi')
    gamma = model.predict_proba(O)
    pickle.dump(model, open('C:/Users/Administrator/Desktop/HMM_program/save/GMM_HMM_model.pkl', 'wb'))

    return S, A, gamma 
Example 5
Project: Python-Machine-Learning-Cookbook-Second-Edition   Author: PacktPublishing   File: speech_recognizer.py    License: MIT License 5 votes vote down vote up
def __init__(self, model_name='GaussianHMM', n_components=4, cov_type='diag', n_iter=1000):
        self.model_name = model_name
        self.n_components = n_components
        self.cov_type = cov_type
        self.n_iter = n_iter
        self.models = []

        if self.model_name == 'GaussianHMM':
            self.model = hmm.GaussianHMM(n_components=self.n_components, 
                    covariance_type=self.cov_type, n_iter=self.n_iter)
        else:
            raise TypeError('Invalid model type')

    # X is a 2D numpy array where each row is 13D 
Example 6
Project: Voice-based-gender-recognition   Author: SuperKogito   File: GenderIdentifier.py    License: MIT License 5 votes vote down vote up
def process(self):
        files = self.get_file_paths(self.females_training_path, self.males_training_path)
        # read the test directory and get the list of test audio files
        for file in files:
            self.total_sample += 1
            print("%10s %8s %1s" % ("--> TESTING", ":", os.path.basename(file)))

            #self.ffmpeg_silence_eliminator(file, file.split('.')[0] + "_without_silence.wav")

            # extract MFCC & delta MFCC features from audio
            try: 
                # vector = self.features_extractor.extract_features(file.split('.')[0] + "_without_silence.wav")
                vector = self.features_extractor.extract_features(file)
                spk_gmm = hmm.GaussianHMM(n_components=16)      
                spk_gmm.fit(vector)
                self.spk_vec = spk_gmm.means_
                print(self.spk_vec.shape)
                prediction = list(self.model.predict_classes(self.spk_vec))
                print(prediction)
                if prediction.count(0) <= prediction.count(1) : sc = 1
                else                                          : sc = 0
                
                genders = {0: "female", 1: "male"}
                winner = genders[sc]
                expected_gender = file.split("/")[1][:-1]
                print(expected_gender)
                
                print("%10s %6s %1s" % ("+ EXPECTATION",":", expected_gender))
                print("%10s %3s %1s" %  ("+ IDENTIFICATION", ":", winner))
    
                if winner != expected_gender: self.error += 1
                print("----------------------------------------------------")
    

            except : print("Error")
            # os.remove(file.split('.')[0] + "_without_silence.wav")
            
            
        accuracy     = ( float(self.total_sample - self.error) / float(self.total_sample) ) * 100
        accuracy_msg = "*** Accuracy = " + str(round(accuracy, 3)) + "% ***"
        print(accuracy_msg) 
Example 7
Project: Voice-based-gender-recognition   Author: SuperKogito   File: ModelsTrainer.py    License: MIT License 5 votes vote down vote up
def process(self):
        females, males = self.get_file_paths(self.females_training_path,
                                             self.males_training_path)
        files = females + males
        # collect voice features
        features = {"female" : np.asarray(()), "male" : np.asarray(())}
        
        for file in files:
            print("%10s %8s %1s" % ("--> TESTING", ":", os.path.basename(file)))
            print(features["female"].shape, features["male"].shape)
            # extract MFCC & delta MFCC features from audio
            try: 
                # vector = self.features_extractor.extract_features(file.split('.')[0] + "_without_silence.wav")
                vector  = self.features_extractor.extract_features(file)
                spk_gmm = hmm.GaussianHMM(n_components=16)      
                spk_gmm.fit(vector)
                spk_vec = spk_gmm.means_
                gender  = file.split("/")[1][:-1]
                print(gender)
                # stack super vectors
                if features[gender].size == 0:  features[gender] = spk_vec
                else                         :  features[gender] = np.vstack((features[gender], spk_vec))
            
            except:
                pass
        
        # save models
        self.save_gmm(features["female"], "females")
        self.save_gmm(features["male"],   "males") 
Example 8
Project: Voice-based-gender-recognition   Author: SuperKogito   File: ModelsTrainer.py    License: MIT License 5 votes vote down vote up
def process(self):
        females, males = self.get_file_paths(self.females_training_path,
                                             self.males_training_path)
        files = females + males
        # collect voice features
        features = {"female" : np.asarray(()), "male" : np.asarray(())}
        
        for file in files:
            print("%10s %8s %1s" % ("--> TESTING", ":", os.path.basename(file)))
            print(features["female"].shape, features["male"].shape)
            # extract MFCC & delta MFCC features from audio
            try: 
                # vector = self.features_extractor.extract_features(file.split('.')[0] + "_without_silence.wav")
                vector  = self.features_extractor.extract_features(file)
                spk_gmm = hmm.GaussianHMM(n_components=16)      
                spk_gmm.fit(vector)
                spk_vec = spk_gmm.means_
                gender  = file.split("/")[1][:-1]
                print(gender)
                # stack super vectors
                if features[gender].size == 0:  features[gender] = spk_vec
                else                         :  features[gender] = np.vstack((features[gender], spk_vec))
            
            except:
                pass
        
        # save models
        self.save_gmm(features["female"], "females")
        self.save_gmm(features["male"],   "males") 
Example 9
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_bad_covariance_type(self):
        with pytest.raises(ValueError):
            h = hmm.GaussianHMM(20, covariance_type='badcovariance_type')
            h.means_ = self.means
            h.covars_ = []
            h.startprob_ = self.startprob
            h.transmat_ = self.transmat
            h._check() 
Example 10
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_sample(self, n=1000):
        h = hmm.GaussianHMM(self.n_components, self.covariance_type)
        h.startprob_ = self.startprob
        h.transmat_ = self.transmat
        # Make sure the means are far apart so posteriors.argmax()
        # picks the actual component used to generate the observations.
        h.means_ = 20 * self.means
        h.covars_ = np.maximum(self.covars, 0.1)

        X, state_sequence = h.sample(n, random_state=self.prng)
        assert X.shape == (n, self.n_features)
        assert len(state_sequence) == n 
Example 11
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_fit(self, params='stmc', n_iter=5, **kwargs):
        h = hmm.GaussianHMM(self.n_components, self.covariance_type)
        h.startprob_ = self.startprob
        h.transmat_ = normalized(
            self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
        h.means_ = 20 * self.means
        h.covars_ = self.covars

        lengths = [10] * 10
        X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)

        # Mess up the parameters and see if we can re-learn them.
        # TODO: change the params and uncomment the check
        h.fit(X, lengths=lengths)
        # assert log_likelihood_increasing(h, X, lengths, n_iter) 
Example 12
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_fit_ignored_init_warns(self, caplog):
        h = hmm.GaussianHMM(self.n_components, self.covariance_type)
        h.startprob_ = self.startprob
        h.fit(np.random.randn(100, self.n_components))
        assert len(caplog.records) == 1
        assert "will be overwritten" in caplog.records[0].getMessage() 
Example 13
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_fit_sequences_of_different_length(self):
        lengths = [3, 4, 5]
        X = self.prng.rand(sum(lengths), self.n_features)

        h = hmm.GaussianHMM(self.n_components, self.covariance_type)
        # This shouldn't raise
        # ValueError: setting an array element with a sequence.
        h.fit(X, lengths=lengths) 
Example 14
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_fit_with_length_one_signal(self):
        lengths = [10, 8, 1]
        X = self.prng.rand(sum(lengths), self.n_features)

        h = hmm.GaussianHMM(self.n_components, self.covariance_type)
        # This shouldn't raise
        # ValueError: zero-size array to reduction operation maximum which
        #             has no identity
        h.fit(X, lengths=lengths) 
Example 15
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_covar_is_writeable(self):
        h = hmm.GaussianHMM(n_components=1, covariance_type="diag",
                            init_params="c")
        X = np.random.normal(size=(1000, 5))
        h._init(X)

        # np.diag returns a read-only view of the array in NumPy 1.9.X.
        # Make sure this doesn't prevent us from fitting an HMM with
        # diagonal covariance matrix. See PR#44 on GitHub for details
        # and discussion.
        assert h._covars_.flags["WRITEABLE"] 
Example 16
Project: Artificial-Intelligence-with-Python   Author: PacktPublishing   File: speech_recognizer.py    License: MIT License 5 votes vote down vote up
def __init__(self, num_components=4, num_iter=1000):
        self.n_components = num_components
        self.n_iter = num_iter

        self.cov_type = 'diag' 
        self.model_name = 'GaussianHMM' 

        self.models = []

        self.model = hmm.GaussianHMM(n_components=self.n_components, 
                covariance_type=self.cov_type, n_iter=self.n_iter)

    # 'training_data' is a 2D numpy array where each row is 13-dimensional 
Example 17
Project: intro_ds   Author: GenTang   File: stock_analysis.py    License: Apache License 2.0 5 votes vote down vote up
def getHiddenStatus(data):
    """
    使用Gaussian HMM对数据进行建模,并得到预测值
    """
    cols = ["r_5", "r_20", "a_5", "a_20"]
    model = GaussianHMM(n_components=3, covariance_type="full", n_iter=1000,
        random_state=2010)
    model.fit(data[cols])
    hiddenStatus = model.predict(data[cols])
    return hiddenStatus 
Example 18
Project: abu   Author: bbfamily   File: ABuMLCreater.py    License: GNU General Public License v3.0 5 votes vote down vote up
def hmm_gaussian(self, assign=True, **kwargs):
        """
        无监督学习器,实例化GMM,默认使用GMM(n_components=2),通过**kwargs即
        关键字参数透传GMM,即GMM(**kwargs)

        导入模块使用
            try:
                from hmmlearn.hmm import GaussianHMM as GMM
            except ImportError:
                from ..CoreBu.ABuFixes import GMM
        即优先选用hmmlearn中的GaussianHMM,没有安装的情况下使用sklearn中的GMM

        :param assign: 是否保存实例后的hmm对象,默认True,self.hmm = hmm
        :param kwargs: 有参数情况下初始化: GMM(**kwargs)
                       无参数情况下初始化: GMM(n_components=2)
        :return: 实例化的GMM对象
        """
        if kwargs is not None and len(kwargs) > 0:
            hmm = GMM(**kwargs)
        else:
            # 默认只有n_components=2, 两个分类
            hmm = GMM(n_components=2)
        if assign:
            self.hmm = hmm
        return hmm

    # noinspection PyMethodMayBeStatic 
Example 19
Project: Hands-On-Markov-Models-with-Python   Author: PacktPublishing   File: analyse_data.py    License: MIT License 5 votes vote down vote up
def __init__(self, company, test_size=0.33,
                 n_hidden_states=4, n_latency_days=10,
                 n_steps_frac_change=50, n_steps_frac_high=10,
                 n_steps_frac_low=10):
        self._init_logger()

        self.company = company
        self.n_latency_days = n_latency_days

        self.hmm = GaussianHMM(n_components=n_hidden_states)

        self._split_train_test_data(test_size)

        self._compute_all_possible_outcomes(
            n_steps_frac_change, n_steps_frac_high, n_steps_frac_low) 
Example 20
Project: AirTicketPredicting   Author: junlulocky   File: HmmClassifier.py    License: MIT License 5 votes vote down vote up
def __init__(self, referenceSeqs, inputSeq):
        self.referenceSeqs = referenceSeqs
        self.inputSeq = inputSeq

        # feel free to change this model
        self.model = GaussianHMM(n_components=2, covariance_type="full", n_iter=2000) 
Example 21
Project: Stock-Market-Trend-Analysis-Using-HMM-LSTM   Author: JINGEWU   File: GMM_HMM.py    License: MIT License 5 votes vote down vote up
def GMM_HMM(O, lengths, n_states, v_type, n_iter, verbose=True):

    # model = hmm.GMMHMM(n_components=n_states, covariance_type=v_type, n_mix=4, n_iter=n_iter, verbose=verbose).fit(O, lengths)
    model = hmm.GaussianHMM(n_components=n_states, covariance_type=v_type, n_iter=n_iter, verbose=verbose).fit(O, lengths)

    return model 
Example 22
Project: Stock-Market-Trend-Analysis-Using-HMM-LSTM   Author: JINGEWU   File: HMM_duoyinzi.py    License: MIT License 5 votes vote down vote up
def form_model(X, lengths, n, v_type, n_iter, verbose=True):
    model = hmm.GaussianHMM(n_components=n, covariance_type=v_type, n_iter=n_iter, verbose=verbose).fit(X, lengths)
    return model 
Example 23
Project: Voice-based-gender-recognition   Author: SuperKogito   File: GenderIdentifier.py    License: MIT License 4 votes vote down vote up
def process(self):
        files = self.get_file_paths(self.females_training_path, self.males_training_path)
        # read the test directory and get the list of test audio files
        for file in files:
            self.total_sample += 1
            print("%10s %8s %1s" % ("--> TESTING", ":", os.path.basename(file)))

            #self.ffmpeg_silence_eliminator(file, file.split('.')[0] + "_without_silence.wav")

            # extract MFCC & delta MFCC features from audio
            try: 
                # vector = self.features_extractor.extract_features(file.split('.')[0] + "_without_silence.wav")
                vector = self.features_extractor.extract_features(file)
                print("S1")
                # generate gaussian mixture models
                spk_gmm = hmm.GaussianHMM(n_components=16)      
                print("S2")

                # fit features to models
                spk_gmm.fit(vector)
                print("S3")
                
                self.spk_vec = spk_gmm.means_
                print(self.clf.predict(self.spk_vec))
                if sum(self.clf.predict(self.spk_vec)) > 0 : sc =  1
                else                                       : sc = -1
                genders = {-1: "female", 1: "male"}
                winner = genders[sc]
                expected_gender = file.split("/")[1][:-1]
                print(expected_gender)
                
                print("%10s %6s %1s" % ("+ EXPECTATION",":", expected_gender))
                print("%10s %3s %1s" %  ("+ IDENTIFICATION", ":", winner))

                if winner != expected_gender: self.error += 1
                print("----------------------------------------------------")

    
            except : print("Error")
            # os.remove(file.split('.')[0] + "_without_silence.wav")
            
            
        accuracy     = ( float(self.total_sample - self.error) / float(self.total_sample) ) * 100
        accuracy_msg = "*** Accuracy = " + str(round(accuracy, 3)) + "% ***"
        print(accuracy_msg) 
Example 24
Project: hmmlearn   Author: hmmlearn   File: test_gaussian_hmm.py    License: BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_fit_with_priors(self, params='stmc', n_iter=5):
        startprob_prior = 10 * self.startprob + 2.0
        transmat_prior = 10 * self.transmat + 2.0
        means_prior = self.means
        means_weight = 2.0
        covars_weight = 2.0
        if self.covariance_type in ('full', 'tied'):
            covars_weight += self.n_features
        covars_prior = self.covars

        h = hmm.GaussianHMM(self.n_components, self.covariance_type)
        h.startprob_ = self.startprob
        h.startprob_prior = startprob_prior
        h.transmat_ = normalized(
            self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
        h.transmat_prior = transmat_prior
        h.means_ = 20 * self.means
        h.means_prior = means_prior
        h.means_weight = means_weight
        h.covars_ = self.covars
        h.covars_prior = covars_prior
        h.covars_weight = covars_weight

        lengths = [200] * 10
        X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)

        # Re-initialize the parameters and check that we can converge to the
        # original parameter values.
        h_learn = hmm.GaussianHMM(self.n_components, self.covariance_type,
                                  params=params)
        h_learn.n_iter = 0
        h_learn.fit(X, lengths=lengths)

        assert log_likelihood_increasing(h_learn, X, lengths, n_iter)

        # Make sure we've converged to the right parameters.
        # a) means
        assert np.allclose(sorted(h.means_.tolist()),
                           sorted(h_learn.means_.tolist()),
                           0.01)
        # b) covars are hard to estimate precisely from a relatively small
        #    sample, thus the large threshold
        assert np.allclose(sorted(h._covars_.tolist()),
                           sorted(h_learn._covars_.tolist()),
                           10)