Python django.conf.settings.REDIS_PORT Examples

The following are 11 code examples for showing how to use django.conf.settings.REDIS_PORT(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module django.conf.settings , or try the search function .

Example 1
Project: DevOps   Author: YoLoveLife   File: dashboard.py    License: GNU General Public License v2.0 6 votes vote down vote up
def get(self, request, *args, **kwargs):
        connect = redis.StrictRedis(
            host=settings.REDIS_HOST,
            port=settings.REDIS_PORT,
            db=settings.REDIS_SPACE,
            password=settings.REDIS_PASSWD
        )
        week_list = ['Won', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
        TEMP = connect.hgetall('WORK',)
        WORK = []
        for key in week_list:
            WORK.append({
                'time': str(key, encoding='utf-8'),
                '执行次数': TEMP[key]
            })
        return Response(
            {'title': '一周内工单执行','dataset': WORK} or {}, status.HTTP_200_OK
        ) 
Example 2
Project: DevOps   Author: YoLoveLife   File: dashboard.py    License: GNU General Public License v2.0 6 votes vote down vote up
def get(self, request, *args, **kwargs):
        connect = redis.StrictRedis(
            host=settings.REDIS_HOST,
            port=settings.REDIS_PORT,
            db=settings.REDIS_SPACE,
            password=settings.REDIS_PASSWD
        )
        TEMP = connect.hgetall('GROUP',)
        GROUP = [
            ['主机数目','count'],
        ]
        for key in TEMP:
            GROUP.append([str(key, encoding='utf-8'), int(TEMP[key])])
        return Response(
            {'title': '主机统计', 'dataset': GROUP} or {}, status.HTTP_200_OK
        ) 
Example 3
Project: koku   Author: project-koku   File: cache.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def invalidate_view_cache_for_tenant_and_cache_key(schema_name, cache_key_prefix=None):
    """Invalidate our view cache for a specific tenant and source type.

    If cache_key_prefix is None, all views will be invalidated.
    """
    cache = caches["default"]
    if isinstance(cache, RedisCache):
        cache = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB)
        all_keys = cache.keys("*")
        all_keys = [key.decode("utf-8") for key in all_keys]
    elif isinstance(cache, LocMemCache):
        all_keys = list(locmem._caches.get(settings.TEST_CACHE_LOCATION).keys())
        all_keys = [key.split(":", 2)[-1] for key in all_keys]
    else:
        msg = "Using an unsupported caching backend!"
        raise KokuCacheError(msg)

    all_keys = all_keys if all_keys is not None else []

    if cache_key_prefix:
        keys_to_invalidate = [key for key in all_keys if (schema_name in key and cache_key_prefix in key)]
    else:
        # Invalidate all cached views for the tenant
        keys_to_invalidate = [key for key in all_keys if schema_name in key]

    for key in keys_to_invalidate:
        cache.delete(key)

    msg = f"Invalidated request cache for\n\ttenant: {schema_name}\n\tcache_key_prefix: {cache_key_prefix}"
    LOG.info(msg) 
Example 4
Project: KortURL   Author: yandenghong   File: client.py    License: MIT License 5 votes vote down vote up
def __init__(self):
        self._pool = redis.ConnectionPool(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
                                          decode_responses=True, db=settings.MAP_CACHE_DB,
                                          password=settings.REDIS_PASSWORD)
        self.client = redis.Redis(connection_pool=self._pool) 
Example 5
Project: zulip   Author: zulip   File: redis_utils.py    License: Apache License 2.0 5 votes vote down vote up
def get_redis_client() -> redis.StrictRedis:
    return redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
                             password=settings.REDIS_PASSWORD, db=0) 
Example 6
def __init__(self, verbose=False):
        self.r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)
        self.verbose = verbose 
Example 7
Project: DevOps   Author: YoLoveLife   File: dashboard.py    License: GNU General Public License v2.0 5 votes vote down vote up
def get(self, request, *args, **kwargs):
        connect = redis.StrictRedis(
            host=settings.REDIS_HOST,
            port=settings.REDIS_PORT,
            db=settings.REDIS_SPACE,
            password=settings.REDIS_PASSWD
        )
        TEMP = connect.hgetall('COUNT',)
        COUNT = {}
        for key in TEMP:
            COUNT[str(key, encoding='utf-8')] = TEMP[key]
        return Response(
            COUNT or {}, status.HTTP_200_OK
        ) 
Example 8
Project: canvas   Author: canvasnetworks   File: channels.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def connect(self):  
        cc = lambda *args: protocol.ClientCreator(reactor, *args)

        self.redis_sub = RedisDispatch(settings.REDIS_HOST, settings.REDIS_PORT)
        redis_factory = RedisServiceRegisteringFactory(self)
        reactor.connectTCP(settings.REDIS_HOST, settings.REDIS_PORT, redis_factory)
        yield redis_factory.deferred 
Example 9
Project: freeipa-password-reset   Author: larrabee   File: pwdmanager.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        if self.__kerberos_has_ticket() is False:
            self.__kerberos_init()
        if api.isdone('finalize') is False:
            api.bootstrap_with_global_options(context='api')
            api.finalize()
        api.Backend.rpcclient.connect()
        self.redis = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, password=settings.REDIS_PASSWORD) 
Example 10
Project: cognitive   Author: CiscoSystems   File: results_storm.py    License: Apache License 2.0 4 votes vote down vote up
def __init__(self, thread_id, name, experiment, component_id, max_results, cache_results):
        threading.Thread.__init__(self)
        self.threadID = thread_id
        self.name = name
        self.experiment = experiment
        self.comp_id = component_id
        self.result = {}
        self.max_results = max_results
        self.cache_results = cache_results
        print "Submitting topology to storm. End component", self.comp_id
        exp = Experiment.objects.get(pk=self.experiment)
        graph = exp.workflow.graph_data
        graph_data = {}
        print graph
        tmp = graph.split(',')
        for elem in tmp:
            first_node = elem.split(":")[0]
            second_node = elem.split(":")[1]
            if second_node in graph_data:
                depend_nodes = graph_data[second_node]
                depend_nodes.add(first_node)
            else:
                graph_data[second_node] = set()
                graph_data[second_node].add(first_node)
        topological_graph = toposort_flatten(graph_data)
        print "Graph after topological sort", topological_graph
        message = {
            'exp_id': self.experiment, 'result': self.comp_id,
            'graph': topological_graph, 'components': defaultdict()}

        for data in topological_graph:
            component_id = int(data)
            comp = Component.objects.get(pk=component_id)
            if comp.operation_type.function_type == 'Create':
                if comp.operation_type.function_arg == 'Table':
                        filename = comp.operation_type.function_subtype_arg
                        input_data = read_csv(filename)
                        message['input'] = {}
                        for elem in list(input_data.columns):
                            message['input'][elem] = list(input_data[elem])
                        message['cols'] = list(input_data.columns)
                        # message['input'] = input_data.to_dict()

            serialized_obj = serializers.serialize('json', [comp.operation_type, ])
            print "Component_id", component_id, " ", comp.operation_type
            message['components'][data] = serialized_obj

        print "Message ", message
        r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)
        self.pubsub = r.pubsub(ignore_subscribe_messages=True)
        self.pubsub.subscribe("Exp " + str(self.experiment))
        ret = r.publish('workflow', json.dumps(message))
        print "return", ret 
Example 11
Project: canvas   Author: canvasnetworks   File: profile_redis.py    License: BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def handle(self, sample='10000', host='ip-10-203-46-218.ec2.internal', *args, **options):
        slave_redis = CanvasRedis(host=host, port=settings.REDIS_PORT, db=settings.REDIS_DB_MAIN)
        slave_cache = CanvasRedis(host=host, port=settings.REDIS_PORT, db=settings.REDIS_DB_CACHE)
        
        if sample != "*":
            sample = int(sample)

        def human(size):
            # Multiply size * 3 to roughly account for the difference in RDB vs in-memory size.
            return "%.1f MB" % (size * 3 / 1000000.0)

        for client in (slave_redis, slave_cache):
            dbsize = client.dbsize()
            if sample == "*":
                print "Summarizing total memory usage for db %s" % client.connection.db
                key_names = client.keys("*")
            else:
                groups = collections.defaultdict(lambda: 0)
                sizes = []
                scalar = 1.0 * dbsize / sample
                print "Sampling %s random keys (of %s) from db %s" % (sample, dbsize, client.connection.db)
                pipeline = client.pipeline()
                for i in range(sample):
                    pipeline.randomkey()
                key_names = pipeline.execute()

            chunksize = 10000
            cursor = 0
            key_sizes = []
            while cursor < len(key_names):
                pipeline = client.pipeline()
                for result in key_names[cursor:cursor+chunksize]:
                    pipeline.execute_command("DEBUG", "OBJECT", result)
                debug_chunk = pipeline.execute()
                for i, result in enumerate(debug_chunk):
                    debug_dict = dict([kv.split(':') for kv in ('type:' + result).split()])
                    key = key_names[cursor + i]
                    keysize = int(debug_dict['serializedlength']) + len(key)
                    key_sizes.append(keysize)
                cursor += chunksize

            if sample == "*":
                print human(sum(key_sizes))
                continue

            # TODO: msg_backlogs look big, figure out how to group these (probably show biggest 25 keys too)
            for key, keysize in zip(key_names, key_sizes):
                keygroup = re.sub("(?<=[:\.]).+(?=[:\.])", "#", key)
                groups[keygroup] += keysize

            print "== TOP 10 RESULTS =="
            for k in sorted(groups, key=lambda k: -groups[k])[:10]:
                size = groups[k]
                print k, human(size * scalar)
            avg = 1.0 * sum(key_sizes) / len(key_sizes)
            print "Average key size: %s (%s estimated total)" % (avg, human(avg * dbsize))
            print ""