Python deap.creator.Individual() Examples
The following are 30
code examples of deap.creator.Individual().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
deap.creator
, or try the search function
.
Example #1
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_predict_proba(): """Assert that the TPOT predict_proba function returns a numpy matrix of shape (num_testing_rows, num_testing_target).""" tpot_obj = TPOTClassifier() tpot_obj._fit_init() pipeline_string = ( 'DecisionTreeClassifier(' 'input_matrix, ' 'DecisionTreeClassifier__criterion=gini, ' 'DecisionTreeClassifier__max_depth=8, ' 'DecisionTreeClassifier__min_samples_leaf=5, ' 'DecisionTreeClassifier__min_samples_split=5)' ) tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline) tpot_obj.fitted_pipeline_.fit(training_features, training_target) result = tpot_obj.predict_proba(testing_features) num_labels = np.amax(testing_target) + 1 assert result.shape == (testing_features.shape[0], num_labels)
Example #2
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_update_top_pipeline(): """Assert that the TPOT _update_top_pipeline updated an optimized pipeline.""" tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light' ) tpot_obj.fit(training_features, training_target) tpot_obj._optimized_pipeline = None tpot_obj.fitted_pipeline_ = None tpot_obj._update_top_pipeline() assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
Example #3
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_memory(): """Assert that the TPOT fit function runs normally with memory=\'auto\'.""" tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, config_dict='TPOT light', memory='auto', verbosity=0 ) tpot_obj.fit(training_features, training_target) assert isinstance(tpot_obj._optimized_pipeline, creator.Individual) assert not (tpot_obj._start_datetime is None) assert tpot_obj.memory is not None assert tpot_obj._memory is None assert tpot_obj._cachedir is not None assert not os.path.isdir(tpot_obj._cachedir)
Example #4
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_evaluated_individuals_(): """Assert that evaluated_individuals_ stores current pipelines and their CV scores.""" tpot_obj = TPOTClassifier( random_state=42, population_size=2, offspring_size=4, generations=1, verbosity=0, config_dict='TPOT light' ) tpot_obj.fit(training_features, training_target) assert isinstance(tpot_obj.evaluated_individuals_, dict) for pipeline_string in sorted(tpot_obj.evaluated_individuals_.keys()): deap_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) sklearn_pipeline = tpot_obj._toolbox.compile(expr=deap_pipeline) operator_count = tpot_obj._operator_count(deap_pipeline) try: cv_scores = model_selection.cross_val_score(sklearn_pipeline, training_features, training_target, cv=5, scoring='accuracy', verbose=0) mean_cv_scores = np.mean(cv_scores) except Exception: mean_cv_scores = -float('inf') assert np.allclose(tpot_obj.evaluated_individuals_[pipeline_string]['internal_cv_score'], mean_cv_scores) assert np.allclose(tpot_obj.evaluated_individuals_[pipeline_string]['operator_count'], operator_count)
Example #5
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_fit_5(): """Assert that the TPOT fit function provides an optimized pipeline with max_time_mins of 2 second with warm_start=True.""" tpot_obj = TPOTClassifier( random_state=42, population_size=2, generations=None, verbosity=0, max_time_mins=3/60., config_dict='TPOT light', warm_start=True ) tpot_obj._fit_init() assert tpot_obj.generations == 1000000 # reset generations to 20 just in case that the failed test may take too much time tpot_obj.generations = 20 tpot_obj.fit(training_features, training_target) assert tpot_obj._pop != [] assert isinstance(tpot_obj._optimized_pipeline, creator.Individual) assert not (tpot_obj._start_datetime is None) # rerun it tpot_obj.fit(training_features, training_target) assert tpot_obj._pop != []
Example #6
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_fit_4(): """Assert that the TPOT fit function provides an optimized pipeline with max_time_mins of 2 second.""" tpot_obj = TPOTClassifier( random_state=42, population_size=2, generations=None, verbosity=0, max_time_mins=2/60., config_dict='TPOT light' ) tpot_obj._fit_init() assert tpot_obj.generations == 1000000 # reset generations to 20 just in case that the failed test may take too much time tpot_obj.generations = 20 tpot_obj.fit(training_features, training_target) assert tpot_obj._pop == [] assert isinstance(tpot_obj._optimized_pipeline, creator.Individual) assert not (tpot_obj._start_datetime is None)
Example #7
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_predict_proba_2(): """Assert that the TPOT predict_proba function returns a numpy matrix filled with probabilities (float).""" tpot_obj = TPOTClassifier() tpot_obj._fit_init() pipeline_string = ( 'DecisionTreeClassifier(' 'input_matrix, ' 'DecisionTreeClassifier__criterion=gini, ' 'DecisionTreeClassifier__max_depth=8, ' 'DecisionTreeClassifier__min_samples_leaf=5, ' 'DecisionTreeClassifier__min_samples_split=5)' ) tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline) tpot_obj.fitted_pipeline_.fit(training_features, training_target) result = tpot_obj.predict_proba(testing_features) rows, columns = result.shape for i in range(rows): for j in range(columns): float_range(result[i][j])
Example #8
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_predict_3(): """Assert that the TPOT predict function works on dataset with nan""" tpot_obj = TPOTClassifier() tpot_obj._fit_init() pipeline_string = ( 'DecisionTreeClassifier(' 'input_matrix, ' 'DecisionTreeClassifier__criterion=gini, ' 'DecisionTreeClassifier__max_depth=8, ' 'DecisionTreeClassifier__min_samples_leaf=5, ' 'DecisionTreeClassifier__min_samples_split=5' ')' ) tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline) tpot_obj.fitted_pipeline_.fit(training_features, training_target) result = tpot_obj.predict(features_with_nan) assert result.shape == (features_with_nan.shape[0],)
Example #9
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_predict_2(): """Assert that the TPOT predict function returns a numpy matrix of shape (num_testing_rows,).""" tpot_obj = TPOTClassifier() tpot_obj._fit_init() pipeline_string = ( 'DecisionTreeClassifier(' 'input_matrix, ' 'DecisionTreeClassifier__criterion=gini, ' 'DecisionTreeClassifier__max_depth=8, ' 'DecisionTreeClassifier__min_samples_leaf=5, ' 'DecisionTreeClassifier__min_samples_split=5' ')' ) tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline) tpot_obj.fitted_pipeline_.fit(training_features, training_target) result = tpot_obj.predict(testing_features) assert result.shape == (testing_features.shape[0],)
Example #10
Source File: cma_1+l_minfct.py From deap with GNU Lesser General Public License v3.0 | 6 votes |
def main(): numpy.random.seed() # The CMA-ES One Plus Lambda algorithm takes a initialized parent as argument parent = creator.Individual((numpy.random.rand() * 5) - 1 for _ in range(N)) parent.fitness.values = toolbox.evaluate(parent) strategy = cma.StrategyOnePlusLambda(parent, sigma=5.0, lambda_=10) toolbox.register("generate", strategy.generate, ind_init=creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) algorithms.eaGenerateUpdate(toolbox, ngen=200, halloffame=hof, stats=stats)
Example #11
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def test_score_2(): """Assert that the TPOTClassifier score function outputs a known score for a fixed pipeline.""" tpot_obj = TPOTClassifier(random_state=34) tpot_obj._fit_init() known_score = 0.977777777778 # Assumes use of the TPOT accuracy function # Create a pipeline with a known score pipeline_string = ( 'KNeighborsClassifier(' 'input_matrix, ' 'KNeighborsClassifier__n_neighbors=10, ' 'KNeighborsClassifier__p=1, ' 'KNeighborsClassifier__weights=uniform' ')' ) tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline) tpot_obj.fitted_pipeline_.fit(training_features, training_target) # Get score from TPOT score = tpot_obj.score(testing_features, testing_target) assert np.allclose(known_score, score)
Example #12
Source File: base.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def clean_pipeline_string(self, individual): """Provide a string of the individual without the parameter prefixes. Parameters ---------- individual: individual Individual which should be represented by a pretty string Returns ------- A string like str(individual), but with parameter prefixes removed. """ dirty_string = str(individual) # There are many parameter prefixes in the pipeline strings, used solely for # making the terminal name unique, eg. LinearSVC__. parameter_prefixes = [(m.start(), m.end()) for m in re.finditer(', [\w]+__', dirty_string)] # We handle them in reverse so we do not mess up indices pretty = dirty_string for (start, end) in reversed(parameter_prefixes): pretty = pretty[:start + 2] + pretty[end:] return pretty
Example #13
Source File: cma_minfct.py From deap with GNU Lesser General Public License v3.0 | 6 votes |
def main(): # The cma module uses the numpy random number generator numpy.random.seed(128) # The CMA-ES algorithm takes a population of one individual as argument # The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html # for more details about the rastrigin and other tests for CMA-ES strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) #logger = tools.EvolutionLogger(stats.functions.keys()) # The CMA-ES algorithm converge with good probability with those settings algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof) # print "Best individual is %s, %s" % (hof[0], hof[0].fitness.values) return hof[0].fitness.values[0]
Example #14
Source File: emna.py From deap with GNU Lesser General Public License v3.0 | 6 votes |
def main(): N, LAMBDA = 30, 1000 MU = int(LAMBDA/4) strategy = EMNA(centroid=[5.0]*N, sigma=5.0, mu=MU, lambda_=LAMBDA) toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.sphere) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) # Numpy equality function (operators.eq) between two arrays returns the # equality element wise, which raises an exception in the if similar() # check of the hall of fame. Using a different equality function like # numpy.array_equal or numpy.allclose solve this issue. hof = tools.HallOfFame(1, similar=numpy.array_equal) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) algorithms.eaGenerateUpdate(toolbox, ngen=150, stats=stats, halloffame=hof) return hof[0].fitness.values[0]
Example #15
Source File: base.py From tpot with GNU Lesser General Public License v3.0 | 6 votes |
def _setup_toolbox(self): with warnings.catch_warnings(): warnings.simplefilter('ignore') creator.create('FitnessMulti', base.Fitness, weights=(-1.0, 1.0)) creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMulti, statistics=dict) self._toolbox = base.Toolbox() self._toolbox.register('expr', self._gen_grow_safe, pset=self._pset, min_=self._min, max_=self._max) self._toolbox.register('individual', tools.initIterate, creator.Individual, self._toolbox.expr) self._toolbox.register('population', tools.initRepeat, list, self._toolbox.individual) self._toolbox.register('compile', self._compile_to_sklearn) self._toolbox.register('select', tools.selNSGA2) self._toolbox.register('mate', self._mate_operator) if self.tree_structure: self._toolbox.register('expr_mut', self._gen_grow_safe, min_=self._min, max_=self._max + 1) else: self._toolbox.register('expr_mut', self._gen_grow_safe, min_=self._min, max_=self._max) self._toolbox.register('mutate', self._random_mutation_operator)
Example #16
Source File: 04-optimize-simionescu.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", np.min) stats.register("avg", np.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with elitism: population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print info for best solution found: best = hof.items[0] print("-- Best Individual = ", best) print("-- Best Fitness = ", best.fitness.values[0]) # extract statistics: minFitnessValues, meanFitnessValues = logbook.select("min", "avg") # plot statistics: sns.set_style("whitegrid") plt.plot(minFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Min / Average Fitness') plt.title('Min and Average fitness over Generations') plt.show()
Example #17
Source File: 01-solve-friedman.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", numpy.min) stats.register("avg", numpy.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with hof feature added: population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print best solution found: best = hof.items[0] print("-- Best Ever Individual = ", best) print("-- Best Ever Fitness = ", best.fitness.values[0]) # extract statistics: minFitnessValues, meanFitnessValues = logbook.select("min", "avg") # plot statistics: sns.set_style("whitegrid") plt.plot(minFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Min / Average Fitness') plt.title('Min and Average fitness over Generations') plt.show()
Example #18
Source File: 05-optimize-simionescu-second.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", np.min) stats.register("avg", np.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with elitism: population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print info for best solution found: best = hof.items[0] print("-- Best Individual = ", best) print("-- Best Fitness = ", best.fitness.values[0]) # extract statistics: minFitnessValues, meanFitnessValues = logbook.select("min", "avg") # plot statistics: sns.set_style("whitegrid") plt.plot(minFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Min / Average Fitness') plt.title('Min and Average fitness over Generations') plt.show()
Example #19
Source File: 01-optimize-eggholder.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", np.min) stats.register("avg", np.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with elitism: population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print info for best solution found: best = hof.items[0] print("-- Best Individual = ", best) print("-- Best Fitness = ", best.fitness.values[0]) # extract statistics: minFitnessValues, meanFitnessValues = logbook.select("min", "avg") # plot statistics: sns.set_style("whitegrid") plt.plot(minFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Min / Average Fitness') plt.title('Min and Average fitness over Generations') plt.show()
Example #20
Source File: 03-solve-tsp.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", np.min) stats.register("avg", np.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with hof feature added: population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print best individual info: best = hof.items[0] print("-- Best Ever Individual = ", best) print("-- Best Ever Fitness = ", best.fitness.values[0]) # plot best solution: plt.figure(1) tsp.plotData(best) # plot statistics: minFitnessValues, meanFitnessValues = logbook.select("min", "avg") plt.figure(2) sns.set_style("whitegrid") plt.plot(minFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Min / Average Fitness') plt.title('Min and Average fitness over Generations') # show both plots: plt.show()
Example #21
Source File: 02-solve-tsp-first-attempt.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", np.min) stats.register("avg", np.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with hof feature added: population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print best individual info: best = hof.items[0] print("-- Best Ever Individual = ", best) print("-- Best Ever Fitness = ", best.fitness.values[0]) # plot best solution: plt.figure(1) tsp.plotData(best) # plot statistics: minFitnessValues, meanFitnessValues = logbook.select("min", "avg") plt.figure(2) sns.set_style("whitegrid") plt.plot(minFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Min / Average Fitness') plt.title('Min and Average fitness over Generations') # show both plots: plt.show()
Example #22
Source File: tpot_tests.py From tpot with GNU Lesser General Public License v3.0 | 5 votes |
def test_mutNodeReplacement_2(): """Assert that mutNodeReplacement() returns the correct type of mutation node in a complex pipeline.""" tpot_obj = TPOTClassifier() tpot_obj._fit_init() # a pipeline with 4 operators pipeline_string = ( "LogisticRegression(" "KNeighborsClassifier(BernoulliNB(PolynomialFeatures" "(input_matrix, PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, " "PolynomialFeatures__interaction_only=False), BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=False), " "KNeighborsClassifier__n_neighbors=10, KNeighborsClassifier__p=1, KNeighborsClassifier__weights=uniform)," "LogisticRegression__C=10.0, LogisticRegression__dual=False, LogisticRegression__penalty=l2)" ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) pipeline[0].ret = Output_Array old_ret_type_list = [node.ret for node in pipeline] old_prims_list = [node for node in pipeline if node.arity != 0] # test 30 times for _ in range(30): mut_ind = mutNodeReplacement(tpot_obj._toolbox.clone(pipeline), pset=tpot_obj._pset) new_ret_type_list = [node.ret for node in mut_ind[0]] new_prims_list = [node for node in mut_ind[0] if node.arity != 0] if new_prims_list == old_prims_list: # Terminal mutated assert new_ret_type_list == old_ret_type_list else: # Primitive mutated Primitive_Count = 0 for node in mut_ind[0]: if isinstance(node, gp.Primitive): Primitive_Count += 1 assert Primitive_Count == 4 diff_prims = [x for x in new_prims_list if x not in old_prims_list] diff_prims += [x for x in old_prims_list if x not in new_prims_list] if len(diff_prims) > 1: # Sometimes mutation randomly replaces an operator that already in the pipelines assert diff_prims[0].ret == diff_prims[1].ret assert mut_ind[0][0].ret == Output_Array
Example #23
Source File: 01-solve-knapsack.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("max", numpy.max) stats.register("avg", numpy.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with hof feature added: population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print best solution found: best = hof.items[0] print("-- Best Ever Individual = ", best) print("-- Best Ever Fitness = ", best.fitness.values[0]) print("-- Knapsack Items = ") knapsack.printItems(best) # extract statistics: maxFitnessValues, meanFitnessValues = logbook.select("max", "avg") # plot statistics: sns.set_style("whitegrid") plt.plot(maxFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Max / Average Fitness') plt.title('Max and Average fitness over Generations') plt.show()
Example #24
Source File: 03-OneMax-short-hof.py From Hands-On-Genetic-Algorithms-with-Python with MIT License | 5 votes |
def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("max", numpy.max) stats.register("avg", numpy.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with hof feature added: population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print Hall of Fame info: print("Hall of Fame Individuals = ", *hof.items, sep="\n") print("Best Ever Individual = ", hof.items[0]) # extract statistics: maxFitnessValues, meanFitnessValues = logbook.select("max", "avg") # plot statistics: sns.set_style("whitegrid") plt.plot(maxFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Max / Average Fitness') plt.title('Max and Average Fitness over Generations') plt.show()
Example #25
Source File: create_toolbox.py From pyleecan with Apache License 2.0 | 5 votes |
def create_toolbox(self): """OptiGenAlgNsga2Deap method to create DEAP toolbox Parameters ---------- self : OptiGenAlgNsga2Deap Returns ------- self : OptiGenAlgNsga2Deap OptiGenAlgNsga2Deap with toolbox created """ # Create toolbox self.toolbox = base.Toolbox() # Create Fitness and individual creator.create( "FitnessMin", base.Fitness, weights=[-1 for _ in self.problem.design_var] ) creator.create("Individual", list, typecode="d", fitness=creator.FitnessMin) self.toolbox.register("creator", creator.Individual) # Register individual and population self.toolbox.register( "individual", create_indiv, self.toolbox.creator, self.problem.output, self.problem.design_var, ) self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
Example #26
Source File: bit_counter.py From Artificial-Intelligence-with-Python with MIT License | 5 votes |
def create_toolbox(num_bits): creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, fitness=creator.FitnessMax) # Initialize the toolbox toolbox = base.Toolbox() # Generate attributes toolbox.register("attr_bool", random.randint, 0, 1) # Initialize structures toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, num_bits) # Define the population to be a list of individuals toolbox.register("population", tools.initRepeat, list, toolbox.individual) # Register the evaluation operator toolbox.register("evaluate", eval_func) # Register the crossover operator toolbox.register("mate", tools.cxTwoPoint) # Register a mutation operator toolbox.register("mutate", tools.mutFlipBit, indpb=0.05) # Operator for selecting individuals for breeding toolbox.register("select", tools.selTournament, tournsize=3) return toolbox
Example #27
Source File: symbol_regression.py From Artificial-Intelligence-with-Python with MIT License | 5 votes |
def create_toolbox(): pset = gp.PrimitiveSet("MAIN", 1) pset.addPrimitive(operator.add, 2) pset.addPrimitive(operator.sub, 2) pset.addPrimitive(operator.mul, 2) pset.addPrimitive(division_operator, 2) pset.addPrimitive(operator.neg, 1) pset.addPrimitive(math.cos, 1) pset.addPrimitive(math.sin, 1) pset.addEphemeralConstant("rand101", lambda: random.randint(-1,1)) pset.renameArguments(ARG0='x') creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=2) toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("compile", gp.compile, pset=pset) toolbox.register("evaluate", eval_func, points=[x/10. for x in range(-10,10)]) toolbox.register("select", tools.selTournament, tournsize=3) toolbox.register("mate", gp.cxOnePoint) toolbox.register("expr_mut", gp.genFull, min_=0, max_=2) toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset) toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17)) toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17)) return toolbox
Example #28
Source File: robot.py From Artificial-Intelligence-with-Python with MIT License | 5 votes |
def create_toolbox(): global robot, pset pset = gp.PrimitiveSet("MAIN", 0) pset.addPrimitive(robot.if_target_ahead, 2) pset.addPrimitive(Prog().prog2, 2) pset.addPrimitive(Prog().prog3, 3) pset.addTerminal(robot.move_forward) pset.addTerminal(robot.turn_left) pset.addTerminal(robot.turn_right) creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax) toolbox = base.Toolbox() # Attribute generator toolbox.register("expr_init", gp.genFull, pset=pset, min_=1, max_=2) # Structure initializers toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr_init) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", eval_func) toolbox.register("select", tools.selTournament, tournsize=7) toolbox.register("mate", gp.cxOnePoint) toolbox.register("expr_mut", gp.genFull, min_=0, max_=2) toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset) return toolbox
Example #29
Source File: bbob.py From deap with GNU Lesser General Public License v3.0 | 5 votes |
def main(func, dim, maxfuncevals, ftarget=None): toolbox = base.Toolbox() toolbox.register("update", update) toolbox.register("evaluate", func) toolbox.decorate("evaluate", tupleize) # Create the desired optimal function value as a Fitness object # for later comparison opt = creator.FitnessMin((ftarget,)) # Interval in which to initialize the optimizer interval = -5, 5 sigma = (interval[1] - interval[0])/2.0 alpha = 2.0**(1.0/dim) # Initialize best randomly and worst as a place holder best = creator.Individual(random.uniform(interval[0], interval[1]) for _ in range(dim)) worst = creator.Individual([0.0] * dim) # Evaluate the first individual best.fitness.values = toolbox.evaluate(best) # Evolve until ftarget is reached or the number of evaluation # is exausted (maxfuncevals) for g in range(1, maxfuncevals): toolbox.update(worst, best, sigma) worst.fitness.values = toolbox.evaluate(worst) if best.fitness <= worst.fitness: # Incease mutation strength and swap the individual sigma = sigma * alpha best, worst = worst, best else: # Decrease mutation strength sigma = sigma * alpha**(-0.25) # Test if we reached the optimum of the function # Remember that ">" for fitness means better (not greater) if best.fitness > opt: return best return best
Example #30
Source File: onefifth.py From deap with GNU Lesser General Public License v3.0 | 5 votes |
def main(): """Implements the One-Fifth rule algorithm as expressed in : Kern, S., S.D. Muller, N. Hansen, D. Buche, J. Ocenasek and P. Koumoutsakos (2004). Learning Probability Distributions in Continuous Evolutionary Algorithms - A Comparative Review. Natural Computing, 3(1), pp. 77-112. However instead of parent and offspring the algorithm is expressed in terms of best and worst. Best is equivalent to the parent, and worst to the offspring. Instead of producing a new individual each time, we have defined a function which updates the worst individual using the best one as the mean of the gaussian and the sigma computed as the standard deviation. """ random.seed(64) logbook = tools.Logbook() logbook.header = "gen", "fitness" interval = (-3,7) mu = (random.uniform(interval[0], interval[1]) for _ in range(IND_SIZE)) sigma = (interval[1] - interval[0])/2.0 alpha = 2.0**(1.0/IND_SIZE) best = creator.Individual(mu) best.fitness.values = toolbox.evaluate(best) worst = creator.Individual((0.0,)*IND_SIZE) NGEN = 1500 for g in range(NGEN): toolbox.update(worst, best, sigma) worst.fitness.values = toolbox.evaluate(worst) if best.fitness <= worst.fitness: sigma = sigma * alpha best, worst = worst, best else: sigma = sigma * alpha**(-0.25) logbook.record(gen=g, fitness=best.fitness.values) print(logbook.stream) return best