Python multiprocessing.freeze_support() Examples

The following are 13 code examples of multiprocessing.freeze_support(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: diag.py    From py-futu-api with Apache License 2.0 6 votes vote down vote up
def print_sys_info(opend_ip=None, opend_port=None):
    if futu.IS_PY2:
        mp.freeze_support()
    opend_version = get_opend_version(opend_ip, opend_port)
    futu_path = os.path.abspath(os.path.realpath(futu.__file__))
    log_dir = _get_log_dir()

    print('Futu path: ',  futu_path)
    print('Futu version: ', futu.__version__)
    print('OpenD version:', opend_version)
    print('Python path: ', sys.executable)
    print('Python version: ', platform.python_version())
    print('OS: ', sys.platform)
    print('Platform: ', platform.platform())
    print('Arch: ', platform.architecture())
    print('Module search path: ', sys.path)
    print('Log dir: ', log_dir) 
Example #2
Source File: px.py    From px with MIT License 5 votes vote down vote up
def main():
    multiprocessing.freeze_support()
    sys.excepthook = handle_exceptions

    parse_config()

    run_pool() 
Example #3
Source File: compute_idf.py    From sentence-similarity with MIT License 5 votes vote down vote up
def parts(self):
        words=set(self.voc.keys())
        multiprocessing.freeze_support()
        cores=multiprocessing.cpu_count()
        pool=multiprocessing.Pool(processes=cores-2)
        reuslt=pool.map(self.com_idf,words)
        idf_dict=dict()
        for r in reuslt:
            k=list(r.keys())[0]
            v=list(r.values())[0]
            idf_dict[k]=idf_dict.get(k,0)+v
        with codecs.open(self.file_idf,'w',encoding='utf-8') as f:
            f.write(json.dumps(idf_dict,ensure_ascii=False,indent=2,sort_keys=False)) 
Example #4
Source File: main_module.py    From ibeis with Apache License 2.0 5 votes vote down vote up
def main_close(main_locals=None):
    #import utool as ut
    #if ut.VERBOSE:
    #    print('main_close')
    # _close_parallel()
    _reset_signals()


#if __name__ == '__main__':
#    multiprocessing.freeze_support() 
Example #5
Source File: plugin.py    From galaxy_blizzard_plugin with MIT License 5 votes vote down vote up
def main():
    multiprocessing.freeze_support()
    create_and_run_plugin(BNetPlugin, sys.argv) 
Example #6
Source File: EventMonkey.py    From EventMonkey with Apache License 2.0 5 votes vote down vote up
def Main():
    multiprocessing.freeze_support()
    Config.Config.ClearLogs()
    
    ###GET OPTIONS###
    arguements = GetArguements()
    options = arguements.parse_args()
    
    # Check if there is geodb if frozen
    if getattr(sys,'frozen',False):
        geodb_file = os.path.join(
            'geodb',
            'GeoLite2-City.mmdb'
        )
        
        if not os.path.isfile(geodb_file):
            if GetYesNo(("There is no geodb found, would you like to download it? "
                        "This is required for using basic Geo IP support within the "
                        "report queries. If you choose not to use this functionality "
                        "expect errors for templates that use custom functions calling "
                        "geoip functions.")):
                InitGeoDb(geodb_file)
        else:
            SqliteCustomFunctions.GEO_MANAGER.AttachGeoDbs('geodb')
    
    if options.subparser_name == "process":
        options.db_name = os.path.join(
            options.output_path,
            options.evidencename+'.db'
        )
        manager = WindowsEventManager.WindowsEventManager(
            options
        )
        manager.ProcessEvents()
        CreateReports(options)
    elif options.subparser_name == "report":
        CreateReports(options)
    else:
        raise(Exception("Unknown subparser: {}".format(options.subparser_name))) 
Example #7
Source File: app.py    From rd-usb with GNU General Public License v3.0 5 votes vote down vote up
def run_view():
        if len(sys.argv) > 1 and "fork" in sys.argv[1]:
            multiprocessing.freeze_support()
            exit(0)

        def callback():
            run(False)

        url = "http://%s:%s" % ("127.0.0.1", 5000)
        view = Webview(url)
        view.callback = callback
        view.title = "RD-USB"
        view.width = 1250
        view.height = 800
        view.start() 
Example #8
Source File: __main__.py    From randovania with GNU General Public License v3.0 5 votes vote down vote up
def main():
    multiprocessing.freeze_support()
    cli.run_cli(sys.argv) 
Example #9
Source File: Pred.py    From AVEC2018 with MIT License 5 votes vote down vote up
def main():
	#These two lines are for windows threads
	if __name__ == '__main__':
		multiprocessing.freeze_support()
		endOrNot = setup(False)
		if (endOrNot == True):
			if (len(sys.argv) > 1) :
				arg = sys.argv[1]
				for i in range(len(sys.argv)):
					if (str(sys.argv[i]) == "--debug" or str(sys.argv[i]) == "debug"):
						v.debugMode = True
					if (str(sys.argv[i]) == "--full" or str(sys.argv[i]) == "full"):
						v.fullMode = True
				if (v.fullMode == True):
					print("Full mode : now doing predictions with linear regression.")
				if (isInt(arg, len(v.desc))):
					Pred(int(arg))
				elif (str(arg) == "help"):
					print("For unimodal prediction, here the correspondance")
					for i in range(len(v.desc)):
						print i,v.nameMod[i]
				elif (str(arg) == "--debug" or str(arg) == "debug" or str(arg) == "full" or str(arg) == "--full"):
					Pred(None)
				else :
					print("Error on arguments")
					print("For unimodal prediction, here the correspondance")
					for i in range(len(v.desc)):
						print i,v.nameMod[i]
					print("For full mode (using linear regressions for predictions) type --full or full")
					print("For debug mode, type --debug or debug")
			else :
				Pred(None)
		else :
			print ("Error on setup, please check files") 
Example #10
Source File: openroastapp.py    From Openroast with GNU General Public License v3.0 5 votes vote down vote up
def main():
    #os.chdir(get_script_dir())
    os.chdir(os.path.dirname(sys.argv[0]))
    print("changing to folder %s" % os.path.dirname(sys.argv[0]))
    multiprocessing.freeze_support()
    app = OpenroastApp()
    app.run() 
Example #11
Source File: check_v2ray.py    From ss-ssr-v2ray-gadget with MIT License 4 votes vote down vote up
def multi_proc(configs):
	global t_conf
	multiprocessing.freeze_support()
	proc = multiprocessing.Pool(16)

	proc_result = []
	if isinstance(configs, dict):
		t = []
		t.append(configs.copy())
		configs = t

	for i, ei in enumerate(configs):
		r = proc.apply_async(sub_proc, args=(i, ei, t_conf))
		proc_result.append(r)

	proc.close()
	proc.join()

	configs_all = []
	for k in proc_result:
		configs_all.append(k.get())
	
	info = []
	configs_good_temp = []
	configs_bad_temp = []
	configs_bad = []
	configs_good = []
	for j in configs_all:
		info.append((j[1],j[2]))
		if j[1] == 9:
			configs_bad_temp.append(j[0])
		else:
			configs_good_temp.append(j)

	if configs_good_temp:
		configs_good_temp.sort(key = lambda x:x[2])
		configs_good_temp.sort(key = lambda x:x[1])
		for i in configs_good_temp:
			r = re.match('^\d_\d\.\d{2}_(.*)', i[0].get('remarks'))
			if r:
				remarks = r.group(1)
			else:
				remarks = i[0].get('remarks')
			remarks = '{}_{}_{}'.format(i[1], i[2], remarks)
			i[0]['remarks'] = remarks[:60]
			configs_good.append(i[0])

	if configs_bad_temp:
		for k in configs_bad_temp:
			r = re.match('^\d_\d\.\d{2}_(.*)', k.get('remarks'))
			if r:
				remarks = r.group(1)
			else:
				remarks = k.get('remarks')
			remarks = '{}_{}_HCR_{}'.format('9', '9.99', remarks)
			k['remarks'] = remarks[:60]
			configs_bad.append(k)
	
	return configs_good, configs_bad, info 
Example #12
Source File: test_pipeline.py    From human-rl with MIT License 4 votes vote down vote up
def train_classifier(test, blocker=False):
    
    number_train=20
    number_valid=30
    number_test=25

    steps = 1000
    batch_size= 1024
    conv_layers = 3

    if test:
        number_train=2
        number_valid=2
        number_test=2
        steps = 50
        batch_size = 20
        conv_layers = 2

    multiprocessing.freeze_support()

    episode_paths = frame.episode_paths(input_path)
    print('Found {} episodes'.format(len(episode_paths)))
    np.random.seed(seed=42)
    np.random.shuffle(episode_paths)

    if blocker:
        common_hparams = dict(use_action=True,  expected_positive_weight=0.05)
        labeller = humanrl.pong_catastrophe.PongBlockerLabeller()
    else:
        common_hparams = dict(use_action=False)
        labeller = humanrl.pong_catastrophe.PongClassifierLabeller()
        
    data_loader = DataLoader(labeller, TensorflowClassifierHparams(**common_hparams))
    datasets = data_loader.split_episodes(episode_paths,
                                          number_train, number_valid, number_test, use_all=False)


    hparams_list = [
        dict(image_crop_region=((34,34+160),(0,160)), #image_shape=[42, 42, 1], 
             convolution2d_stack_args=[(4, [3, 3], [2, 2])] * conv_layers, batch_size=batch_size, multiprocess=False,
             fully_connected_stack_args=[50,10],
             use_observation=False, use_image=True,
             verbose=True
         ) 
    ]

    start_experiment = time.time()
    print('Run experiment params: ', dict(number_train=number_train, number_valid=number_valid,
                                          number_test=number_test, steps=steps, batch_size=batch_size,
                                          conv_layers=conv_layers) )
    print('hparams', common_hparams, hparams_list[0])
    
    
    logdir = save_classifier_path
    run_experiments(
        logdir, data_loader, datasets, common_hparams, hparams_list, steps=steps, log_every=int(.1*steps))

    time_experiment = time.time() - start_experiment
    print('Steps: {}. Time in mins: {}'.format(steps, (1/60)*time_experiment))

    run_classifier_metrics() 
Example #13
Source File: test_pipeline.py    From human-rl with MIT License 4 votes vote down vote up
def train_classifier(test, blocker=False):
    
    number_train=20
    number_valid=30
    number_test=25

    steps = 1000
    batch_size= 1024
    conv_layers = 3

    if test:
        number_train=2
        number_valid=2
        number_test=2
        steps = 50
        batch_size = 20
        conv_layers = 2

    multiprocessing.freeze_support()

    episode_paths = frame.episode_paths(input_path)
    print('Found {} episodes'.format(len(episode_paths)))
    np.random.seed(seed=42)
    np.random.shuffle(episode_paths)

    if blocker:
        common_hparams = dict(use_action=True,  expected_positive_weight=0.05)
        labeller = humanrl.pong_catastrophe.PongBlockerLabeller()
    else:
        common_hparams = dict(use_action=False)
        labeller = humanrl.pong_catastrophe.PongClassifierLabeller()
        
    data_loader = DataLoader(labeller, TensorflowClassifierHparams(**common_hparams))
    datasets = data_loader.split_episodes(episode_paths,
                                          number_train, number_valid, number_test, use_all=False)


    hparams_list = [
        dict(image_crop_region=((34,34+160),(0,160)), #image_shape=[42, 42, 1], 
             convolution2d_stack_args=[(4, [3, 3], [2, 2])] * conv_layers, batch_size=batch_size, multiprocess=False,
             fully_connected_stack_args=[50,10],
             use_observation=False, use_image=True,
             verbose=True
         ) 
    ]

    start_experiment = time.time()
    print('Run experiment params: ', dict(number_train=number_train, number_valid=number_valid,
                                          number_test=number_test, steps=steps, batch_size=batch_size,
                                          conv_layers=conv_layers) )
    print('hparams', common_hparams, hparams_list[0])
    
    
    logdir = save_classifier_path
    run_experiments(
        logdir, data_loader, datasets, common_hparams, hparams_list, steps=steps, log_every=int(.1*steps))

    time_experiment = time.time() - start_experiment
    print('Steps: {}. Time in mins: {}'.format(steps, (1/60)*time_experiment))

    run_classifier_metrics()