Python bson.json_util.dumps() Examples
The following are 30
code examples of bson.json_util.dumps().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
bson.json_util
, or try the search function
.
Example #1
Source File: app.py From timecop with Apache License 2.0 | 7 votes |
def result_list(): from bson import json_util timedata = request.get_json() collection_ts = timedata.get('collection', 'NA') database = timedata.get('database', 'NA') url = timedata.get('url', 'NA') ###"mongodb://username:pwd@ds261570.mlab.com:61570/ts?retryWrites=false" import pandas as pd import pymongo from pymongo import MongoClient # Making a Connection with MongoClient client = MongoClient(url) # database db = client[database] # collection collection_data= db[collection_ts] import time import json from bson import json_util, ObjectId #Dump loaded BSON to valid JSON string and reload it as dict page_sanitized = json.loads(json_util.dumps(collection_data.find({},{'name':1}))) return jsonify(page_sanitized), 201
Example #2
Source File: manager.py From manager with GNU General Public License v3.0 | 6 votes |
def get_device_group_info(device_group): device_group_exists, device_group_json = mongo_connection.mongo_get_device_group(device_group) if device_group_exists is False: return jsonify({"device_group_exists": False}), 403 device_group_config = {"apps": [], "apps_list": [], "prune_id": device_group_json["prune_id"], "cron_jobs": [], "cron_jobs_list": [], "device_group_id": device_group_json["device_group_id"]} for device_app in device_group_json["apps"]: app_exists, app_json = mongo_connection.mongo_get_app(device_app) if app_exists is True: device_group_config["apps"].append(app_json) device_group_config["apps_list"].append(app_json["app_name"]) for device_cron_job in device_group_json["cron_jobs"]: cron_job_exists, cron_job_json = mongo_connection.mongo_get_cron_job(device_cron_job) if cron_job_exists is True: device_group_config["cron_jobs"].append(cron_job_json) device_group_config["cron_jobs_list"].append(cron_job_json["cron_job_name"]) return dumps(device_group_config), 200 # create device_group
Example #3
Source File: views.py From kobo-predict with BSD 2-Clause "Simplified" License | 6 votes |
def delete_substage(request, id): try: sub_stage = Stage.objects.get(pk=id) old_fsxf = sub_stage.stage_forms old_fsxf.is_deleted = True # old_fsxf.stage = None old_fsxf.save() # org = sub_stage.stage.project.organization if sub_stage.stage.project else sub_stage.stage.site.project.organization # desc = "deleted form of stage {} substage {} by {}".format(sub_stage.stage.name, sub_stage.name, # request.user.username) # noti = old_fsxf.logs.create(source=request.user, type=1, title="form Deleted", # organization=org, description=desc) # result = {} # result['description'] = desc # result['url'] = noti.get_absolute_url() # ChannelGroup("notify-{}".format(org.id)).send({"text": json.dumps(result)}) # ChannelGroup("notify-0").send({"text": json.dumps(result)}) # sub_stage.delete() return Response({}, status=status.HTTP_200_OK) except Exception as e: return Response({'error':e.message}, status=status.HTTP_400_BAD_REQUEST)
Example #4
Source File: export.py From codex-backend with MIT License | 6 votes |
def export_metadata(): mdc = MetaController() hashes = request.forms.dict.get("file_hash[]") dump_to_save = "" random_id = id_generator() tmp_path = "/tmp/meta_export" tmp_folder = os.path.join(tmp_path, random_id) call_with_output(["mkdir", "-p", tmp_folder]) for hash in hashes: hash = clean_hash(hash.replace('\r', '')) res = mdc.read(hash) dump = dumps(res, indent=4) file_name = os.path.join(tmp_folder, str(hash) + '.txt') fd = open(file_name, "w") fd.write(dump) fd.close() zip_path = os.path.join(tmp_path, random_id + '.zip') call_with_output(["zip", "-jr", zip_path, tmp_folder]) resp = static_file(str(random_id) + '.zip', root=tmp_path, download=True) resp.set_cookie('fileDownload', 'true') shutil.rmtree(tmp_folder) os.remove(zip_path) return resp
Example #5
Source File: manager.py From manager with GNU General Public License v3.0 | 6 votes |
def update_user_group_fields(user_group): # check user_group exists first user_group_exists = mongo_connection.mongo_check_user_group_exists(user_group) if user_group is False: return jsonify({"user_group_exists": False}), 403 # check app got update parameters try: app_json = request.json if len(app_json) == 0: return jsonify({"missing_parameters": True}), 400 except: return jsonify({"missing_parameters": True}), 400 # update db app_json = mongo_connection.mongo_update_user_group(user_group, request.json) return dumps(app_json), 200 # delete a user_group
Example #6
Source File: api2.py From codex-backend with MIT License | 6 votes |
def get_metadata(): if request.query.file_hash == '': response.status = 400 return jsonize({'message': 'file_hash parameter is missing'}) file_hash = clean_hash(request.query.file_hash) if not valid_hash(file_hash): response.status = 400 return jsonize({'message': 'Invalid hash format (use MD5, SHA1 or SHA2)'}) file_hash = get_file_id(file_hash) if file_hash is None: response.status = 404 return jsonize({'message': 'Metadata not found in the database'}) mdc = MetaController() res = mdc.read(file_hash) if res is None: log_event("metadata", file_hash) return dumps(change_date_to_str(res))
Example #7
Source File: archive_raw.py From n6 with GNU Affero General Public License v3.0 | 6 votes |
def preparations_data(self, data): """ Data preparation. Args: `data` : data from AMQP. Raises: `n6QueueProcessingException` when except processing data. """ try: self.raw = loads(data) # calculate md5, inplace its fastest self.headers['meta'].update({ 'md5': hashlib.md5(dumps(self.raw, sort_keys=True)).hexdigest()}) except Exception as exc: LOGGER.error('exception when processing: %r %r %r (%r)', self.dbm.currdb, self.dbm.currcoll, data, exc) raise else: self.write()
Example #8
Source File: custom.py From mongoengine-goodjson with MIT License | 6 votes |
def to_json(self, *args, **kwargs) -> str: """ Convert this document to JSON. Parameters: use_db_field: Serialize field names as they appear in MongoDB (as opposed to attribute names on this document). Defaults to True. raw: Set True to generate MongoDB Extended JSON. """ use_db_field = kwargs.pop("use_db_field", True) raw = kwargs.pop("raw", False) if not raw: for (fldname, fld) in self._fields.items(): setattr(fld, "$$mode$$", "json") ret = json_util.dumps( self.to_mongo(use_db_field, raw=raw), *args, **kwargs, ) return ret
Example #9
Source File: view.py From xunfeng with GNU General Public License v3.0 | 6 votes |
def Getplugin(): type = request.form.get('type', '') risk = request.form.get('risk', '') search = request.form.get('search', '') query = {} if type: query['type'] = type if risk: query['level'] = risk if search: search = unquote(search) query['name'] = {"$regex": search, '$options': 'i'} cursor = Mongo.coll['Plugin'].find(query) rsp = [] for i in cursor: result = {'name': i['name'], 'info': i['info']} rsp.append(result) return json.dumps(rsp) # 新增任务异步
Example #10
Source File: manager.py From manager with GNU General Public License v3.0 | 6 votes |
def prune_images_on_all_device_groups(): # get a list of all device_groups device_groups = mongo_connection.mongo_list_device_groups() all_device_groups_prune_id = {"prune_ids": {}} # loop over all device groups for device_group in device_groups: # check device_group exists first device_group_exists = mongo_connection.mongo_check_device_group_exists(device_group) if device_group_exists is False: return jsonify({"app_exists": False}), 403 # update db app_json = mongo_connection.mongo_increase_prune_id(device_group) all_device_groups_prune_id["prune_ids"][device_group] = app_json["prune_id"] return dumps(all_device_groups_prune_id), 202 # list reports
Example #11
Source File: manager.py From manager with GNU General Public License v3.0 | 5 votes |
def get_user_group(user_group): user_group_exists, user_json = mongo_connection.mongo_get_user_group(user_group) if user_group_exists is True: return dumps(user_json), 200 elif user_group_exists is False: return jsonify({"user_group_exists": False}), 403 # create cron_job
Example #12
Source File: util.py From sync-engine with GNU Affero General Public License v3.0 | 5 votes |
def process_result_value(self, value, dialect): return int128_to_b36(value) # http://bit.ly/1LbMnqu # Can simply use this as is because though we use bson.json_util, loads() # dumps() return standard Python dicts like the json.* equivalents # (because these are simply called under the hood)
Example #13
Source File: manager.py From manager with GNU General Public License v3.0 | 5 votes |
def create_app(app_name): # check app does't exists first app_exists = mongo_connection.mongo_check_app_exists(app_name) if app_exists is True: return jsonify({"app_exists": True}), 403 else: # check the request is passed with all needed parameters try: app_json = request.json except: return json.dumps(find_missing_params({}, ["docker_image"])), 400 try: starting_ports = return_sane_default_if_not_declared("starting_ports", app_json, []) containers_per = return_sane_default_if_not_declared("containers_per", app_json, {"server": 1}) env_vars = return_sane_default_if_not_declared("env_vars", app_json, {}) docker_image = app_json["docker_image"] running = return_sane_default_if_not_declared("running", app_json, True) networks = return_sane_default_if_not_declared("networks", app_json, ["nebula", "bridge"]) volumes = return_sane_default_if_not_declared("volumes", app_json, []) devices = return_sane_default_if_not_declared("devices", app_json, []) privileged = return_sane_default_if_not_declared("privileged", app_json, False) rolling_restart = return_sane_default_if_not_declared("rolling_restart", app_json, False) except: return json.dumps(find_missing_params(app_json, ["docker_image"])), 400 # check edge case of port being outside of possible port ranges ports_check_return_message, port_check_return_code = check_ports_valid_range(starting_ports) if port_check_return_code >= 300: return ports_check_return_message, port_check_return_code # update the db app_json = mongo_connection.mongo_add_app(app_name, starting_ports, containers_per, env_vars, docker_image, running, networks, volumes, devices, privileged, rolling_restart) return dumps(app_json), 200 # delete an app
Example #14
Source File: manager.py From manager with GNU General Public License v3.0 | 5 votes |
def restart_app(app_name): app_exists, app_json = mongo_connection.mongo_get_app(app_name) # check app exists first if app_exists is False: return jsonify({"app_exists": False}), 403 # check if app already running: if app_json["running"] is False: return jsonify({"running_before_restart": False}), 403 # post to db app_json = mongo_connection.mongo_increase_app_id(app_name) return dumps(app_json), 202 # stop an app
Example #15
Source File: util.py From sync-engine with GNU Affero General Public License v3.0 | 5 votes |
def process_bind_param(self, value, dialect): if value is None: return None return json_util.dumps(value)
Example #16
Source File: manager.py From manager with GNU General Public License v3.0 | 5 votes |
def get_user(user_name): user_exists, user_json = mongo_connection.mongo_get_user(user_name) if user_exists is True: return dumps(user_json), 200 elif user_exists is False: return jsonify({"user_exists": False}), 403 # delete a user
Example #17
Source File: manager.py From manager with GNU General Public License v3.0 | 5 votes |
def update_user(user_name): # check user exists first user_exists = mongo_connection.mongo_check_user_exists(user_name) if user_exists is False: return jsonify({"user_name": False}), 403 # check user got update parameters try: user_json = request.json if len(user_json) == 0: return jsonify({"missing_parameters": True}), 400 except: return jsonify({"missing_parameters": True}), 400 # if part of the update includes a token hash it try: request.json["token"] = hash_secret(request.json["token"]) except: pass # if part of the update includes a password hash it try: request.json["password"] = hash_secret(request.json["password"]) except: pass # update db user_json = mongo_connection.mongo_update_user(user_name, request.json) return dumps(user_json), 200 # refresh a user token
Example #18
Source File: manager.py From manager with GNU General Public License v3.0 | 5 votes |
def create_user_group(user_group): # check app does't exists first user_exists = mongo_connection.mongo_check_user_group_exists(user_group) if user_exists is True: return jsonify({"user_group_exists": True}), 403 else: # check the request is passed with all needed parameters try: user_json = request.json except: return json.dumps(find_missing_params({}, ["user_group"])), 400 try: # return the user_group parameters, anything not declared is by default not allowed group_members = return_sane_default_if_not_declared("group_members", user_json, []) pruning_allowed = return_sane_default_if_not_declared("pruning_allowed", user_json, False) apps = return_sane_default_if_not_declared("apps", user_json, {}) device_groups = return_sane_default_if_not_declared("device_groups", user_json, {}) admin = return_sane_default_if_not_declared("admin", user_json, False) cron_jobs = return_sane_default_if_not_declared("cron_jobs", user_json, {}) except: return jsonify({"missing_parameters": True}), 400 # update the db user_json = mongo_connection.mongo_add_user_group(user_group, group_members, pruning_allowed, apps, device_groups, admin, cron_jobs) return dumps(user_json), 200 # PUT update some fields of a user_group
Example #19
Source File: manager.py From manager with GNU General Public License v3.0 | 5 votes |
def get_cron_job(cron_job): cron_job_exists, cron_job_json = mongo_connection.mongo_get_cron_job(cron_job) if cron_job_exists is True: return dumps(cron_job_json), 200 elif cron_job_exists is False: return jsonify({"cron_job_exists": False}), 403 # PUT update some fields of an cron job - params not given will be unchanged from their current value
Example #20
Source File: manager.py From manager with GNU General Public License v3.0 | 5 votes |
def update_cron_job_all_fields(cron_job): # check cron_job exists first cron_job_exists = mongo_connection.mongo_check_cron_job_exists(cron_job) if cron_job_exists is False: return jsonify({"cron_job_exists": False}), 403 # check cron_job got update parameters try: cron_job_json = request.json if len(cron_job_json) == 0: return jsonify({"missing_parameters": True}), 400 if cron_job_json["docker_image"] is None or cron_job_json["schedule"] is None: return json.dumps(find_missing_params(cron_job_json, ["docker_image", "schedule"])), 400 except: return jsonify({"missing_parameters": True}), 400 # set default for undeclared params try: cron_job_json["env_vars"] = return_sane_default_if_not_declared("env_vars", cron_job_json, {}) cron_job_json["running"] = return_sane_default_if_not_declared("running", cron_job_json, True) cron_job_json["volumes"] = return_sane_default_if_not_declared("volumes", cron_job_json, []) cron_job_json["devices"] = return_sane_default_if_not_declared("devices", cron_job_json, []) cron_job_json["privileged"] = return_sane_default_if_not_declared("privileged", cron_job_json, False) cron_job_json["networks"] = return_sane_default_if_not_declared("networks", cron_job_json, ["nebula", "bridge"]) except: return json.dumps(find_missing_params(cron_job_json, ["docker_image", "schedule"])), 400 # check edge case of port being outside of possible port ranges in case trying to update port listing try: schedule = request.json["schedule"] # check edge case where schedule is not valid if croniter.is_valid(schedule) is False: return jsonify({"schedule_valid": False}), 400 except: pass # update db cron_job_json = mongo_connection.mongo_update_cron_job_fields(cron_job, request.json) return dumps(cron_job_json), 202 # delete a cron_job
Example #21
Source File: util.py From sync-engine with GNU Affero General Public License v3.0 | 5 votes |
def json_field_too_long(value): return len(json_util.dumps(value)) > MAX_TEXT_CHARS
Example #22
Source File: util.py From sync-engine with GNU Affero General Public License v3.0 | 5 votes |
def process_result_value(self, value, dialect): if not value: return None # Unfortunately loads() is strict about invalid utf-8 whereas dumps() # is not. This can result in ValueErrors during decoding - we simply # log and return None for now. # http://bugs.python.org/issue11489 try: return json_util.loads(value) except ValueError: log.error('ValueError on decoding JSON', value=value)
Example #23
Source File: av_count.py From codex-backend with MIT License | 5 votes |
def av_count(): count = db.av_analysis.count() return dumps({"count": count})
Example #24
Source File: 057_consolidate_account_sync_status_columns.py From sync-engine with GNU Affero General Public License v3.0 | 5 votes |
def upgrade(): from inbox.sqlalchemy_ext.util import JSON, MutableDict from inbox.ignition import main_engine engine = main_engine(pool_size=1, max_overflow=0) from inbox.models.session import session_scope from sqlalchemy.ext.declarative import declarative_base op.add_column('account', sa.Column('_sync_status', MutableDict.as_mutable(JSON()), default={}, nullable=True)) Base = declarative_base() Base.metadata.reflect(engine) class Account(Base): __table__ = Base.metadata.tables['account'] with session_scope(versioned=False) \ as db_session: for acct in db_session.query(Account): d = dict(sync_start_time=str(acct.sync_start_time), sync_end_time=str(acct.sync_end_time)) acct._sync_status = json_util.dumps(d) db_session.commit() op.drop_column('account', 'sync_start_time') op.drop_column('account', 'sync_end_time')
Example #25
Source File: mongo.py From vakt with Apache License 2.0 | 5 votes |
def down(self): def process(doc): """Processor for down""" doc_to_save = copy.deepcopy(doc) rules_to_save = {} for name, rule in doc['rules'].items(): rule_type = rule[self._type_marker] rule_contents = rule.copy() del rule_contents[self._type_marker] rule_to_save = {'type': rule_type, 'contents': {}} # check if we are dealing with 3-rd party or custom rules if not rule_type.startswith('vakt.rules.'): for value in rule_contents.values(): # if rule has non-primitive data as its contents - we can't revert it to 1.1.0 if isinstance(value, (dict, Rule)) and jsonpickle.tags.RESERVED.intersection(value.keys()): raise Irreversible('Custom rule class contains non-primitive data %s' % value) # vakt's own RegexMatchRule couldn't be stored in mongo because is has non-primitive data, # so it's impossible to put it to storage if we revert time back to 1.1.0 elif rule_type == 'vakt.rules.string.RegexMatchRule': raise Irreversible('vakt.rules.string.RegexMatchRule could not be stored in v1.1.0') rule_to_save['contents'].update(rule_contents) rules_to_save[name] = b_json.dumps(rule_to_save, sort_keys=True) # report or save document doc_to_save['rules'] = rules_to_save return doc_to_save self._each_doc(processor=process)
Example #26
Source File: mongo.py From vakt with Apache License 2.0 | 5 votes |
def __prepare_from_doc(self, doc): """ Prepare Policy object as a return from MongoDB. """ # todo - add dict inheritance del doc['_id'] for field in self.condition_fields: compiled_field_name = self.condition_field_compiled_name(field) if compiled_field_name in doc: del doc[compiled_field_name] return Policy.from_json(b_json.dumps(doc))
Example #27
Source File: FolderIO.py From pax with BSD 3-Clause "New" or "Revised" License | 5 votes |
def startup(self): self.events_per_file = self.config.get('events_per_file', 50) self.first_event_in_current_file = None self.last_event_written = None self.output_dir = self.config['output_name'] if os.path.exists(self.output_dir): if not self.config.get('ignore_existing_dir', False): if self.config.get('overwrite_output', False): if self.config['overwrite_output'] == 'confirm': print("\n\nOutput dir %s already exists. Overwrite? [y/n]:" % self.output_dir) if input().lower() not in ('y', 'yes'): print("\nFine, Exiting pax...\n") exit() self.log.info("Overwriting output directory %s" % self.output_dir) shutil.rmtree(self.output_dir) os.mkdir(self.output_dir) else: raise ValueError("Output directory %s already exists, can't write your %ss there!" % ( self.output_dir, self.file_extension)) else: self.log.info("Creating output directory %s" % self.output_dir) try: os.mkdir(self.output_dir) except OSError as e: if e.errno == errno.EEXIST: # Rare race condition when the trigger creates the dir just after the os.path.exists check # The trigger creates the dir for the trigger monitor data file. pass else: raise # Write the metadata to JSON with open(os.path.join(self.output_dir, 'pax_info.json'), 'w') as outfile: outfile.write(json_util.dumps(self.processor.get_metadata(), sort_keys=True)) # Start the temporary file. Events will first be written here, until events_per_file is reached self.tempfile = os.path.join(self.output_dir, 'temp.' + self.file_extension)
Example #28
Source File: controllers.py From ai-chatbot-framework with MIT License | 5 votes |
def read_entity(id): """ Find details for the given entity name :param id: :return: """ return Response( response=dumps(Entity.objects.get( id=ObjectId(id)).to_mongo().to_dict()), status=200, mimetype="application/json")
Example #29
Source File: controllers.py From ai-chatbot-framework with MIT License | 5 votes |
def read_intent(id): """ Find details for the given intent id :param id: :return: """ return Response(response=dumps( Intent.objects.get( id=ObjectId(id)).to_mongo().to_dict()), status=200, mimetype="application/json")
Example #30
Source File: queue_count.py From codex-backend with MIT License | 5 votes |
def task_finished(): count = (number_of_jobs_on_queue('task_private_vt') + number_of_jobs_on_queue('task_public_vt') + number_of_jobs_on_queue('task_no_vt')) return dumps({"count": count})