Python boto3.client() Examples

The following are 30 code examples of boto3.client(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module boto3 , or try the search function .
Example #1
Source File: lambda_handler.py    From aws-auto-remediate with GNU General Public License v3.0 8 votes vote down vote up
def get_settings(self):
        """Return the DynamoDB aws-auto-remediate-settings table in a Python dict format
        
        Returns:
            dict -- aws-auto-remediate-settings table
        """
        settings = {}
        try:
            for record in boto3.client("dynamodb").scan(
                TableName=os.environ["SETTINGSTABLE"]
            )["Items"]:
                record_json = dynamodb_json.loads(record, True)
                settings[record_json["key"]] = record_json["value"]
        except:
            self.logging.error(
                f"Could not read DynamoDB table '{os.environ['SETTINGSTABLE']}'."
            )
            self.logging.error(sys.exc_info()[1])

        return settings 
Example #2
Source File: fire.py    From fireprox with GNU General Public License v3.0 7 votes vote down vote up
def __init__(self, arguments: argparse.Namespace, help_text: str):
        self.profile_name = arguments.profile_name
        self.access_key = arguments.access_key
        self.secret_access_key = arguments.secret_access_key
        self.session_token = arguments.session_token
        self.region = arguments.region
        self.command = arguments.command
        self.api_id = arguments.api_id
        self.url = arguments.url
        self.api_list = []
        self.client = None
        self.help = help_text

        if self.access_key and self.secret_access_key:
            if not self.region:
                self.error('Please provide a region with AWS credentials')

        if not self.load_creds():
            self.error('Unable to load AWS credentials')

        if not self.command:
            self.error('Please provide a valid command') 
Example #3
Source File: fire.py    From fireprox with GNU General Public License v3.0 7 votes vote down vote up
def update_api(self, api_id, url):
        if not any([api_id, url]):
            self.error('Please provide a valid API ID and URL end-point')

        if url[-1] == '/':
            url = url[:-1]

        resource_id = self.get_resource(api_id)
        if resource_id:
            print(f'Found resource {resource_id} for {api_id}!')
            response = self.client.update_integration(
                restApiId=api_id,
                resourceId=resource_id,
                httpMethod='ANY',
                patchOperations=[
                    {
                        'op': 'replace',
                        'path': '/uri',
                        'value': '{}/{}'.format(url, r'{proxy}'),
                    },
                ]
            )
            return response['uri'].replace('/{proxy}', '') == url
        else:
            self.error(f'Unable to update, no valid resource for {api_id}') 
Example #4
Source File: aws_service.py    From aws-ops-automator with Apache License 2.0 6 votes vote down vote up
def _map_describe_function_parameters(self, resources, args):
        """
        Maps the parameter names passed to the service class describe call to names used to make the call the the boto
        service client describe call
        :param resources: Name of the resource type
        :param args: parameters to be mapped
        :return: mapped parameters
        """

        if len(self._mapped) == 0:
            return args

        mapped_args = args.copy()
        for arg in self._mapped:
            if arg in mapped_args:
                mapped_args[self._mapped[arg]] = args[arg]
                del mapped_args[arg]

        return mapped_args 
Example #5
Source File: add_me_to_ops_automator_role.py    From aws-ops-automator with Apache License 2.0 6 votes vote down vote up
def add_me_to_role(stack, principal):
    role_resource = boto3.client("cloudformation").describe_stack_resource(
        StackName=stack, LogicalResourceId="OpsAutomatorLambdaRole").get("StackResourceDetail", None)

    role_name = role_resource["PhysicalResourceId"]

    role = boto3.client("iam").get_role(RoleName=role_name).get("Role", {})
    assume_role_policy_document = role.get("AssumeRolePolicyDocument", {})
    statement = assume_role_policy_document.get("Statement", [])

    for s in statement:
        if s["Principal"].get("AWS", "") == principal:
            break
    else:
        statement.append({"Action": "sts:AssumeRole", "Effect": "Allow", "Principal": {"AWS": principal}})
        boto3.client("iam").update_assume_role_policy(
            RoleName=role_name,
            PolicyDocument=json.dumps(assume_role_policy_document)
        )
        print(("Principal {} can now assume role {}".format(principal, role_name))) 
Example #6
Source File: forward-events.py    From aws-ops-automator with Apache License 2.0 6 votes vote down vote up
def lambda_handler(event, _):
    print("Ops Automator Events Forwarder (version %version%)")
    destination_region = os.getenv("OPS_AUTOMATOR_REGION", "")
    destination_account = os.getenv("OPS_AUTOMATOR_ACCOUNT")
    source = event.get("source", "")
    detail_type = event.get("detail-type", "")
    if ((event.get("region", "") != destination_region) or (event.get("account", "") != destination_account)) and \
            detail_type in FORWARDED_EVENTS.get(source, []):

        destination_region_sns_client = boto3.client("sns", region_name=destination_region)

        try:
            topic = os.getenv("OPS_AUTOMATOR_TOPIC_ARN")
            destination_region_sns_client.publish(TopicArn=topic, Message=json.dumps(event))
            print((INF_FORWARDED.format(source, detail_type, destination_region, destination_account, topic, str(event))))
            return "OK"
        except Exception as ex:
            raise Exception(ERR_FAILED_FORWARD, str(event), ex)

    else:
        print((INF_EVENT_ALREADY_IN_REGION.format(source, detail_type, destination_region))) 
Example #7
Source File: cloudwatch_queue_handler_lambda.py    From aws-ops-automator with Apache License 2.0 6 votes vote down vote up
def __init__(self):

        self.log_group = os.environ[ENV_LOG_GROUP]

        max_put_calls_per_account = int(os.getenv(ENV_CWL_LIMIT_PUT_CALLS_PER_ACCOUNT, DEFAULT_CWL_LIMIT_PUT_CALLS_PER_ACCOUNT))
        self._max_put_call_account_throttling = Throttle(max_put_calls_per_account)

        self.max_put_calls_per_stream = int(os.getenv(ENV_CWL_LIMIT_PUT_CALLS_PER_STREAM, DEFAULT_CWL_LIMIT_PUT_CALLS_PER_STREAM))
        self._max_put_call_stream_throttling = {}

        max_api_calls = int(os.getenv(ENV_CWL_LIMIT_API_CALLS, DEFAULT_CWL_LIMIT_API_CALLS))
        self._max_cwl_api_calls = Throttle(max_api_calls)

        self._log_client = boto3.client("logs")

        self._stream_tokens = {}

        self._buffer = collections.OrderedDict()
        self._buffer_size = 0
        self.fifo = None 
Example #8
Source File: creation_certificates.py    From harmony-ops with MIT License 6 votes vote down vote up
def get_existing_certs(region, dn):

    acm_client = boto3.client(service_name='acm', region_name=region)
    dict_exist_sslcerts.clear()
    try:
        resp = acm_client.list_certificates(
            CertificateStatuses=['ISSUED', 'PENDING_VALIDATION'],
            MaxItems=1000,
        )
        for cert in resp['CertificateSummaryList']:
            if dn == cert['DomainName']:
                dict_exist_sslcerts[cert['DomainName']].append(cert['CertificateArn'])

        # pp.pprint(dict_exist_sslcerts)
        return dict_exist_sslcerts
    except Exception as e:
        print("[ERROR] Unexpected error to get exist certificates: %s" % e) 
Example #9
Source File: __init__.py    From aws-ops-automator with Apache License 2.0 6 votes vote down vote up
def set_dynamodb_tags(ddb_client, resource_arns, tags, can_delete=True, logger=None):
    def create_tags(client, resources, created_tags):
        for arn in resources:
            client.tag_resource_with_retries(ResourceArn=arn, Tags=created_tags)

    def delete_tags(client, resources, deleted_tags):
        for arn in resources:
            client.untag_resource_with_retries(ResourceArn=arn, TagKeys=deleted_tags)

    _set_resource_tags(client=ddb_client,
                       resources=resource_arns,
                       tags=tags,
                       create_func=create_tags,
                       delete_func=delete_tags,
                       logger=logger,
                       can_delete=can_delete) 
Example #10
Source File: __init__.py    From aws-ops-automator with Apache License 2.0 6 votes vote down vote up
def set_storagegateway_tags(sgw_client, resource_arns, tags, can_delete=True, logger=None):
    def create_tags(client, resources, created_tags):
        for arn in resources:
            client.add_tags_to_resource_with_retries(ResourceARN=arn, Tags=created_tags)

    def delete_tags(client, resources, deleted_tags):
        for arn in resources:
            client.remove_tags_from_resource_with_retries(ResourceARN=arn, TagKeys=deleted_tags)

    _set_resource_tags(client=sgw_client,
                       resources=resource_arns,
                       tags=tags,
                       create_func=create_tags,
                       delete_func=delete_tags,
                       logger=logger,
                       can_delete=can_delete) 
Example #11
Source File: __init__.py    From aws-ops-automator with Apache License 2.0 6 votes vote down vote up
def _set_resource_tags(client, resources, tags, create_func, delete_func, can_delete=True, logger=None):
    tag_set = copy.deepcopy(tags)

    resource_list = resources if isinstance(resources, list) else [resources]

    tags_to_delete = [t for t in tags if tag_set[t] == TAG_DELETE]
    if len(tags_to_delete) > 0:
        if can_delete:
            for t in tags_to_delete:
                del tag_set[t]
            delete_func(client, resource_list, tags_to_delete)

        else:
            if logger is not None:
                logger.warning(WARN_TAGS_CANNOT_BE_DELETED, ",".join(tags_to_delete))

    if len(tag_set) > 0:
        create_func(client, resource_list, tag_key_value_list(tag_set)) 
Example #12
Source File: __init__.py    From aws-ops-automator with Apache License 2.0 6 votes vote down vote up
def get_session(role_arn=None, sts_client=None, logger=None):
    if role_arn not in [None, ""]:
        sts = sts_client if sts_client is not None else boto3.client("sts")
        account = account_from_role_arn(role_arn)
        try:
            token = sts.assume_role(RoleArn=role_arn, RoleSessionName="{}-{}".format(account, str(uuid.uuid4())))
        except botocore.exceptions.ClientError as ex:
            if logger is not None:
                logger.error(ERR_ASSUME_ROLE_FOR_ARN, role_arn, ex)
            raise ex
        credentials = token["Credentials"]
        return boto3.Session(aws_access_key_id=credentials["AccessKeyId"],
                             aws_secret_access_key=credentials["SecretAccessKey"],
                             aws_session_token=credentials["SessionToken"])
    else:
        role = os.getenv(ENV_ROLE_ARN)
        if role is not None:
            return get_session(role, sts_client)
        return boto3.Session() 
Example #13
Source File: lambda_handler.py    From aws-auto-remediate with GNU General Public License v3.0 6 votes vote down vote up
def send_to_missing_remediation_topic(self, config_rule_name, config_payload):
        """Publishes a message onto the missing remediation SNS Topic. The topic should be subscribed to
        by administrators to be aware when their security remediations are not fully covered.
        
        Arguments:
            config_rule_name {string} -- AWS Config Rule name
            config_payload {dictionary} -- AWS Config Rule payload
        """
        client = boto3.client("sns")
        topic_arn = os.environ["MISSINGREMEDIATIONTOPIC"]

        try:
            client.publish(
                TopicArn=topic_arn,
                Message=json.dumps(config_payload),
                Subject=f"No remediation available for Config Rule '{config_rule_name}'",
            )
        except:
            self.logging.error(f"Could not publish to SNS Topic 'topic_arn'.") 
Example #14
Source File: environment.py    From sqs-s3-logger with Apache License 2.0 6 votes vote down vote up
def _schedule_function(self, function_arn, schedule):
        LOGGER.info('Scheduling function {} to {}'.format(self._function_name, schedule))
        events_client = boto.client('events')
        trigger_name = '{}-trigger'.format(self._function_name)

        rule_response = events_client.put_rule(
            Name=trigger_name,
            ScheduleExpression=schedule,
            State='ENABLED',
        )
        self._lambda_client.add_permission(
            FunctionName=self._function_name,
            StatementId="{0}-Event".format(trigger_name),
            Action='lambda:InvokeFunction',
            Principal='events.amazonaws.com',
            SourceArn=rule_response['RuleArn'],
        )
        events_client.put_targets(
            Rule=trigger_name,
            Targets=[{'Id': "1", 'Arn': function_arn}]
        ) 
Example #15
Source File: creation_certificates.py    From harmony-ops with MIT License 6 votes vote down vote up
def request_ssl_certificates(region, dn):
    """
    Notes:
        * idempotent ops
        * store CertificateArn to dict_region_sslcerts
    """
    acm_client = boto3.client(service_name='acm', region_name=region)
    try:
        resp = acm_client.request_certificate(
            DomainName=dn,
            ValidationMethod='DNS',
            IdempotencyToken='112358',
        )
        dict_region_sslcerts[region].append(resp['CertificateArn'])
        print("[INFO] creating ssl certificate in region " + region + " for domain name " + dn)
        print(dn + ': ' + resp['CertificateArn'])
    except Exception as e:
        print("[ERROR] Unexpected error to request certificates: %s" % e) 
Example #16
Source File: log-parser.py    From aws-waf-security-automations with Apache License 2.0 6 votes vote down vote up
def write_output(bucket_name, key_name, output_key_name, outstanding_requesters):
    logging.getLogger().debug('[write_output] Start')

    try:
        current_data = '/tmp/' + key_name.split('/')[-1] + '_LOCAL.json'
        with open(current_data, 'w') as outfile:
            json.dump(outstanding_requesters, outfile)

        s3 = boto3.client('s3')
        s3.upload_file(current_data, bucket_name, output_key_name, ExtraArgs={'ContentType': "application/json"})
        remove(current_data)

    except Exception as e:
        logging.getLogger().error("[write_output] \tError to write output file")
        logging.getLogger().error(e)

    logging.getLogger().debug('[write_output] End') 
Example #17
Source File: ssh.py    From aegea with Apache License 2.0 6 votes vote down vote up
def get_kms_auth_token(session, bless_config, lambda_regional_config):
    logger.info("Requesting new KMS auth token in %s", lambda_regional_config["aws_region"])
    token_not_before = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
    token_not_after = token_not_before + datetime.timedelta(hours=1)
    token = dict(not_before=token_not_before.strftime("%Y%m%dT%H%M%SZ"),
                 not_after=token_not_after.strftime("%Y%m%dT%H%M%SZ"))
    encryption_context = {
        "from": session.resource("iam").CurrentUser().user_name,
        "to": bless_config["lambda_config"]["function_name"],
        "user_type": "user"
    }
    kms = session.client('kms', region_name=lambda_regional_config["aws_region"])
    res = kms.encrypt(KeyId=lambda_regional_config["kms_auth_key_id"],
                      Plaintext=json.dumps(token),
                      EncryptionContext=encryption_context)
    return base64.b64encode(res["CiphertextBlob"]).decode() 
Example #18
Source File: fire.py    From fireprox with GNU General Public License v3.0 6 votes vote down vote up
def create_api(self, url):
        if not url:
            self.error('Please provide a valid URL end-point')

        print(f'Creating => {url}...')

        template = self.get_template()
        response = self.client.import_rest_api(
            parameters={
                'endpointConfigurationTypes': 'REGIONAL'
            },
            body=template
        )
        resource_id, proxy_url = self.create_deployment(response['id'])
        self.store_api(
            response['id'],
            response['name'],
            response['createdDate'],
            response['version'],
            url,
            resource_id,
            proxy_url
        ) 
Example #19
Source File: ami.py    From cloudformation-ami with MIT License 6 votes vote down vote up
def create_ami(instance_id, image_params):
    client = boto3.client('ec2')
    # stop the instance so we don't get charged for the template instance running time after the AMI is created
    client.stop_instances(InstanceIds=[instance_id])
    waiter = client.get_waiter('instance_stopped')
    waiter.wait(InstanceIds=[instance_id])

    for forbidden_param in ['InstanceId', 'NoReboot', 'DryRun']:
        if forbidden_param in image_params:
            del image_params[forbidden_param]

    response = client.create_image(
        InstanceId=instance_id,
        **image_params
    )

    ami_id = response['ImageId']

    return ami_id 
Example #20
Source File: fire.py    From fireprox with GNU General Public License v3.0 6 votes vote down vote up
def _try_instance_profile(self) -> bool:
        """Try instance profile credentials

        :return:
        """
        try:
            if not self.region:
                self.client = boto3.client('apigateway')
            else:
                self.client = boto3.client(
                    'apigateway',
                    region_name=self.region
                )
            self.client.get_account()
            self.region = self.client._client_config.region_name
            return True
        except:
            return False 
Example #21
Source File: PortChange_Generatr.py    From ChaoSlingr with Apache License 2.0 6 votes vote down vote up
def lambda_handler(event, context):
    print(event)
    slingSG_List = []
    optintag = ""
    if "TagName" in event:
        optintag = event['TagName']
        slingSG_List = getSGList(optintag)
        if len(slingSG_List) >= 1:
            sgidnum = changeR(slingSG_List)
            PortChange = addport(sgidnum)
            Package = json.dumps(PortChange)
            print(sgidnum + ' selected for slinging.')
            print(Package)
            response = client.invoke(
                FunctionName='PortChange_Slingr',
                InvocationType='Event',
                Payload=Package
            )
        else:
            print('No security groups with opt-in tags found.  Doing nothing.')
    else:
        print("No opt-in tag specified.  Doing nothing.")
    #getSecGroupIPPermissions(sgidnum) 
Example #22
Source File: ami.py    From cloudformation-ami with MIT License 6 votes vote down vote up
def status_is_ok(instance_id):
    response = boto3.client('ec2').describe_instance_status(
        InstanceIds=[
            instance_id,
        ]
    )

    print('status response:', response)

    instance_statuses = response['InstanceStatuses']
    instance_statuses = list(filter(lambda s: s['InstanceId'] == instance_id, instance_statuses))
    assert len(instance_statuses) <= 1

    if not instance_statuses:
        return False

    instance_status = instance_statuses[0]

    return instance_status['InstanceStatus']['Status'] == 'ok' and instance_status['SystemStatus']['Status'] == 'ok' 
Example #23
Source File: pricing.py    From aegea with Apache License 2.0 6 votes vote down vote up
def describe_services():
    client = boto3.client("pricing", region_name="us-east-1")
    return paginate(client.get_paginator("describe_services")) 
Example #24
Source File: custom-resource.py    From aws-waf-security-automations with Apache License 2.0 5 votes vote down vote up
def add_athena_partitions(add_athena_partition_lambda_function, resource_type,
                          glue_database, access_log_bucket, glue_access_log_table,
                          glue_waf_log_table, waf_log_bucket, athena_work_group):
    logging.getLogger().info("[add_athena_partitions] Start")

    lambda_client = boto3.client('lambda')
    response = lambda_client.invoke(
        FunctionName=add_athena_partition_lambda_function.rsplit(":",1)[-1],
        Payload="""{
                "resourceType":"%s",
                "glueAccessLogsDatabase":"%s",
                "accessLogBucket":"%s",
                "glueAppAccessLogsTable":"%s",
                "glueWafAccessLogsTable":"%s",
                "wafLogBucket":"%s",
                "athenaWorkGroup":"%s"
            }"""%(resource_type, glue_database, access_log_bucket,
                  glue_access_log_table, glue_waf_log_table,
                  waf_log_bucket, athena_work_group)
    )
    logging.getLogger().info("[add_athena_partitions] Lambda invocation response:\n%s"%response)
    logging.getLogger().info("[add_athena_partitions] End")

#======================================================================================================================
# Auxiliary Functions
#====================================================================================================================== 
Example #25
Source File: environment.py    From sqs-s3-logger with Apache License 2.0 5 votes vote down vote up
def _bucket_exists(self, name):
        try:
            self._s3.meta.client.head_bucket(Bucket=name)
            return True
        except ClientError as e:
            if e.response['Error']['Code'] == '404':
                return False
            else:
                raise e 
Example #26
Source File: ami.py    From cloudformation-ami with MIT License 5 votes vote down vote up
def create_instance(instance_params):
    ec2 = boto3.resource('ec2')

    for forbidden_param in ['MaxCount', 'MinCount', 'DryRun']:
        if forbidden_param in instance_params:
            del instance_params[forbidden_param]

    # cast Volume sizes to int
    for idx, block_device_mapping in enumerate(instance_params.get('BlockDeviceMappings', [])):
        instance_params['BlockDeviceMappings'][idx]['Ebs']['VolumeSize'] = int(
            instance_params['BlockDeviceMappings'][idx]['Ebs']['VolumeSize']
        )

    instance_id = ec2.create_instances(
        MinCount=1,
        MaxCount=1,
        **instance_params,
    )[0].id

    # just make sure the instance exists before adding tags
    boto3.client('ec2').get_waiter('instance_exists').wait(
        InstanceIds=[instance_id],
    )

    # now we can add tags
    ec2.create_tags(
        Resources=[instance_id],
        Tags=[
            {
                'Key': "UserDataFinished",  # This tag will be set to "true" when the User Data finishes executing
                'Value': 'false'
            },
        ]
    )

    return instance_id 
Example #27
Source File: ami.py    From cloudformation-ami with MIT License 5 votes vote down vote up
def ensure_ami_exists(ami_id):
    client = boto3.client('ec2')
    resp = client.describe_images(
        ImageIds=[ami_id],
    )
    print('Describe Images response:', resp)
    image_ids = [image['ImageId'] for image in resp['Images']]
    if ami_id not in image_ids:
        raise UnexistingAMIError(f'Unexisting AMI: {ami_id}') 
Example #28
Source File: add_athena_partitions.py    From aws-waf-security-automations with Apache License 2.0 5 votes vote down vote up
def execute_athena_query(log, log_bucket, database_name, table_name,
                         athena_client, athena_work_group):
    """
    This function executes the alter table athena query to
    add partition to athena table.

    Args:
        log: object. logging object
        log_bucket: s3 bucket for logs(cloudfront, alb or waf logs)
        database_name: string. The Athena/Glue database name
        table_name: string. The Athena/Glue table name
        athena_client: object. Athena client object

    Returns:
        None
    """

    s3_output = "s3://%s/athena_results/"%log_bucket

    query_string = build_athena_query(log, database_name, table_name)

    log.info("[execute_athena_query] Query string:\n%s  \
              \nAthena S3 Output Bucket: %s\n"%(query_string, s3_output))

    response = athena_client.start_query_execution(
        QueryString=query_string,
        QueryExecutionContext={'Database': database_name},
        ResultConfiguration={'OutputLocation': s3_output,
                'EncryptionConfiguration': {
                    'EncryptionOption': 'SSE_S3'
                }
            },
        WorkGroup=athena_work_group
    )

    log.info("[execute_athena_query] Query execution response:\n%s"%response) 
Example #29
Source File: environment.py    From sqs-s3-logger with Apache License 2.0 5 votes vote down vote up
def __init__(self, queue_name, bucket_name, function_name, cron_schedule='rate(1 day)'):
        self._queue_name = queue_name
        self._bucket_name = bucket_name
        self._function_name = function_name
        self._cron_schedule = cron_schedule,
        self._s3 = boto.resource('s3')
        self._sqs = boto.resource('sqs')
        self._lambda_client = boto.client('lambda')
        self._iam_client = boto.client('iam')
        self._queue = None
        self._bucket = None 
Example #30
Source File: ami.py    From cloudformation-ami with MIT License 5 votes vote down vote up
def create_new_ami_from_instance_params(event, resource_properties):
    ensure_ami_with_name_does_not_exist(image_name=resource_properties['Image']['Name'])

    sfn_client = boto3.client('stepfunctions')
    sfn_client.start_execution(
        stateMachineArn=os.environ['STATE_MACHINE_ARN'],
        input=json.dumps({
            'cfn_event': event,
            'instance_params': resource_properties['TemplateInstance'],
        })
    )