Python subprocess.check_call() Examples

The following are code examples for showing how to use subprocess.check_call(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: fs_image   Author: facebookincubator   File: test_unshare.py    MIT License 6 votes vote down vote up
def test_pid_namespace_dead_keepalive(self):
        with Unshare([Namespace.PID]) as unshare:
            self._check_ns_diff(unshare, {'pid'})

            good_echo = nsenter_as_user(unshare, 'echo')
            subprocess.check_call(good_echo)  # Will fail once the NS is dead

            proc, _ = self._popen_sleep_forever(unshare)
            time.sleep(2)  # Leave some time for `sleep` to exit erroneously
            self.assertEqual(None, proc.poll())  # Sleeps forever

            self._kill_keepalive(unshare)

            self.assertEqual(-signal.SIGKILL, proc.wait())  # The NS is dead

            # The `echo` command that worked above no longer works.
            with self.assertRaises(subprocess.CalledProcessError):
                subprocess.check_call(good_echo) 
Example 2
Project: fs_image   Author: facebookincubator   File: test_procfs_serde.py    MIT License 6 votes vote down vote up
def _check_serialize_deserialize_idempotence(
        self, subvol, orig_dir, name_with_ext
    ):
        data = deserialize_untyped(
            subvol, os.path.join(orig_dir, name_with_ext),
        )
        new_dir = self._next_dir()
        serialize(data, subvol, os.path.join(new_dir, name_with_ext))
        rendered = _render_subvol(subvol)[1]
        # Ensure that the metadata of the once-serialized and
        # twice-serialized directories are identical.
        self.assertEqual(rendered[orig_dir], rendered[new_dir])
        # Also compare the file contents match
        subprocess.check_call([
            'diff', '--recursive', subvol.path(orig_dir), subvol.path(new_dir),
        ]) 
Example 3
Project: godot-mono-builds   Author: godotengine   File: os_utils.py    MIT License 6 votes vote down vote up
def run_command(command, args=[], cwd=None, env=None, name='command'):
    def cmd_args_to_str(cmd_args):
        return ' '.join([arg if not ' ' in arg else '"%s"' % arg for arg in cmd_args])

    assert isinstance(command, str) and isinstance(args, list)
    args = [command] + args

    check_call_args = {}
    if cwd is not None:
        check_call_args['cwd'] = cwd
    if env is not None:
        check_call_args['env'] = env

    import subprocess
    try:
        print('Running command \'%s\': %s' % (name, subprocess.list2cmdline(args)))
        subprocess.check_call(args, **check_call_args)
        print('Command \'%s\' completed successfully' % name)
    except subprocess.CalledProcessError as e:
        raise BuildError('\'%s\' exited with error code: %s' % (name, e.returncode)) 
Example 4
Project: arm_now   Author: nongiach   File: options.py    MIT License 6 votes vote down vote up
def sync_upload(rootfs, src, dest):
    fs = Filesystem(rootfs)
    if not fs.implemented():
        return
    print("Adding current directory to the filesystem..")
    with tempfile.TemporaryDirectory() as tmpdirname:
        files = [i for i in os.listdir(".") if i != "arm_now" and not i.startswith("-")]
        if files:
            tar = tmpdirname + "/current_directory.tar"
            subprocess.check_call(["tar", "cf", tar] + files)
            subprocess.check_call("e2cp -G 0 -O 0".split(' ') + [tar, rootfs + ":/"])
            fs.create("/etc/init.d/S95_sync_current_diretory", """
                        cd {dest}
                        tar xf /current_directory.tar
                        rm /current_directory.tar
                        rm /etc/init.d/S95_sync_current_diretory
                        """.format(dest=dest), right=555)

    # TODO: check rootfs fs against parameter injection
    fs.create("/sbin/save", """
                cd {dest}
                tar cf /root.tar *
                sync
                """.format(dest=dest), right=555) 
Example 5
Project: BASS   Author: Cisco-Talos   File: ida_service.py    GNU General Public License v2.0 6 votes vote down vote up
def run_ida(db, is_64_bit, timeout, script, *args):
    if os.path.exists(os.path.join(IDA_DIR, "idat")):
        # This is IDA >= 7.0
        IDA_EXECUTABLE = os.path.join(IDA_DIR, "idat")
    else:
        IDA_EXECUTABLE = os.path.join(IDA_DIR, "idal")

    if is_64_bit:
        ida = "{}64".format(IDA_EXECUTABLE)
    else:
        ida = IDA_EXECUTABLE

    cmd = (ida, "-S{} {}".format(script, " ".join("\"{}\"".format(x) for x in args)), "-B", db)
    env = os.environ.copy()
    env["TVHEADLESS"] = "true"
    env["IDALOG"] = os.path.join(LOGS_DIR, datetime.datetime.strftime(datetime.datetime.now(), "ida_%Y-%m-%d_%H-%M-%S.%f.log"))
    logger.info("Executing command %s, log output is in '%s'", " ".join("'%s'" % x for x in cmd), env["IDALOG"])
    try:
        check_call(cmd, timeout = timeout, env = env)
    except OSError as err:
        if err.errno == -9:
            raise TimeoutError(err.errno, "Program execution timed out")
        else:
            raise err 
Example 6
Project: twonicorn   Author: UnblockedByOps   File: deploy.py    Apache License 2.0 6 votes vote down vote up
def sync_artifact_war(tmp_dir_id, deploy_path, artifact_file):
    tmp_artifact_path_current = tmp_dir_id + '/current'
    # explode the war
    logging.debug('Expanding artifact : %s in %s'
                  % (artifact_file,
                     tmp_artifact_path_current))
    unzip(tmp_dir_id + '/' + artifact_file, tmp_artifact_path_current)

    # rsync it
    logging.debug('Rsyncing %s to %s'
                  % (tmp_artifact_path_current,
                     deploy_path))
    # TODO: need to ensure no trailing / ?
    subprocess.check_call(["rsync",
                           "-ra",
                           "--delete",
                           tmp_artifact_path_current,
                           deploy_path]) 
Example 7
Project: gluster-integration   Author: Tendrl   File: setup.py    GNU Lesser General Public License v2.1 6 votes vote down vote up
def run(self):

        print('old version: %s  new version: %s' %
              (metadata['version'], self.version))
        try:
            input('Press enter to confirm, or ctrl-c to exit >')
        except KeyboardInterrupt:
            raise SystemExit("\nNot proceeding")

        old = "__version__ = '%s'" % metadata['version']
        new = "__version__ = '%s'" % self.version
        module_file = read_module_contents()
        with open('version.py', 'w') as fileh:
            fileh.write(module_file.replace(old, new))

        old = 'Version: %s' % metadata['version']
        new = 'Version: %s' % self.version
        spec_file = read_spec_contents()
        with open('tendrl-gluster-integration.spec', 'w') as fileh:
            fileh.write(spec_file.replace(old, new))

        # Commit everything with a standard commit message
        cmd = ['git', 'commit', '-a', '-m', 'version %s' % self.version]
        print(' '.join(cmd))
        subprocess.check_call(cmd) 
Example 8
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: docker_cache.py    Apache License 2.0 6 votes vote down vote up
def delete_local_docker_cache(docker_tag):
    """
    Delete the local docker cache for the entire docker image chain
    :param docker_tag: Docker tag
    :return: None
    """
    history_cmd = ['docker', 'history', '-q', docker_tag]

    try:
        image_ids_b = subprocess.check_output(history_cmd)
        image_ids_str = image_ids_b.decode('utf-8').strip()
        layer_ids = [id.strip() for id in image_ids_str.split('\n') if id != '<missing>']

        delete_cmd = ['docker', 'image', 'rm', '--force']
        delete_cmd.extend(layer_ids)
        subprocess.check_call(delete_cmd)
    except subprocess.CalledProcessError as error:
        # Could be caused by the image not being present
        logging.debug('Error during local cache deletion %s', error) 
Example 9
Project: o2g   Author: hiposfer   File: test_validation.py    MIT License 6 votes vote down vote up
def test_validation(dummy_zipfeed):
    """Run transitfeed over the generated feed."""
    # transitfeed is a python2 application. So we need to run it outside
    # python3 process. Moreover, it is not available in pip repository.
    # Therefore we have to clone it from git and eventually run it in a
    # process of its own. Finally we parse the standard output and look
    # for errors. We ignore the warnings for now.
    if not os.path.exists('transitfeed'):
        subprocess.check_call(
            ['git', 'clone', '-b', '1.2.16', '--single-branch',
             'https://github.com/google/transitfeed'])

    assert os.path.exists('transitfeed/feedvalidator.py')

    p = subprocess.Popen(['python2.7', 'transitfeed/feedvalidator.py', '-n',
                         dummy_zipfeed],
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    out, err = p.communicate()

    print("Google Transitfeed's output:\n{}".format(out.decode('utf8')))

    assert 'error' not in out.decode('utf8')
    assert 'errors' not in out.decode('utf8') 
Example 10
Project: fs_image   Author: facebookincubator   File: yum_dnf_from_snapshot.py    MIT License 5 votes vote down vote up
def _dummy_dev() -> str:
    'A whitelist of devices is safer than the entire host /dev'
    dummy_dev = tempfile.mkdtemp()
    try:
        subprocess.check_call(['sudo', 'chown', 'root:root', dummy_dev])
        subprocess.check_call(['sudo', 'chmod', '0755', dummy_dev])
        subprocess.check_call([
            'sudo', 'touch', os.path.join(dummy_dev, 'null'),
        ])
        yield dummy_dev
    finally:
        # We cannot use `TemporaryDirectory` for cleanup since the directory
        # and contents are owned by root.  Remove recursively since RPMs
        # like `filesystem` can touch this dummy directory.  We will discard
        # their writes, which do not, anyhow, belong in a container image.
        subprocess.run(['sudo', 'rm', '-r', dummy_dev]) 
Example 11
Project: fs_image   Author: facebookincubator   File: test_package_image.py    MIT License 5 votes vote down vote up
def test_package_image_as_btrfs_loopback(self):
        with self._package_image(
            self._sibling_path('create_ops.layer'), 'btrfs',
        ) as out_path, \
                Unshare([Namespace.MOUNT, Namespace.PID]) as unshare, \
                tempfile.TemporaryDirectory() as mount_dir, \
                tempfile.NamedTemporaryFile() as temp_sendstream:
            # Future: use a LoopbackMount object here once that's checked in.
            subprocess.check_call(nsenter_as_root(
                unshare, 'mount', '-t', 'btrfs', '-o', 'loop,discard,nobarrier',
                out_path, mount_dir,
            ))
            try:
                # Future: Once I have FD, this should become:
                # Subvol(
                #     os.path.join(mount_dir.fd_path(), 'create_ops'),
                #     already_exists=True,
                # ).mark_readonly_and_write_sendstream_to_file(temp_sendstream)
                subprocess.check_call(nsenter_as_root(
                    unshare, 'btrfs', 'send', '-f', temp_sendstream.name,
                    os.path.join(mount_dir, 'create_ops'),
                ))
                self._assert_sendstream_files_equal(
                    self._sibling_path('create_ops-original.sendstream'),
                    temp_sendstream.name,
                )
            finally:
                nsenter_as_root(unshare, 'umount', mount_dir) 
Example 12
Project: fs_image   Author: facebookincubator   File: test_tarball.py    MIT License 5 votes vote down vote up
def test_tarball(self):
        with temp_filesystem() as fs_path, tempfile.TemporaryDirectory() as td:
            tar_path = os.path.join(td, 'test.tar')
            zst_path = os.path.join(td, 'test.tar.zst')

            with tarfile.TarFile(tar_path, 'w') as tar_obj:
                tar_obj.add(fs_path, filter=_tarinfo_strip_dir_prefix(fs_path))
            subprocess.check_call(['zstd', tar_path, '-o', zst_path])

            for path in (tar_path, zst_path):
                self._check_item(
                    _tarball_item(path, 'y'),
                    temp_filesystem_provides('y'),
                    {require_directory('y')},
                )

            # Test a hash validation failure, follows the item above
            with self.assertRaisesRegex(AssertionError, 'failed hash vali'):
                image_source_item(
                    TarballItem, exit_stack=None, layer_opts=DUMMY_LAYER_OPTS,
                )(
                    from_target='t',
                    into_dir='y',
                    source={
                        'source': tar_path,
                        'content_hash': 'sha256:deadbeef',
                    },
                    force_root_ownership=False,
                )

    # NB: We don't need to test `build` because TarballItem has no logic
    # specific to generated vs pre-built tarballs.  It would really be
    # enough just to construct the item, but it was easy to test `provides`. 
Example 13
Project: pyblish-win   Author: pyblish   File: test_subprocess.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_check_call_zero(self):
        # check_call() function with zero return code
        rc = subprocess.check_call([sys.executable, "-c",
                                    "import sys; sys.exit(0)"])
        self.assertEqual(rc, 0) 
Example 14
Project: pyblish-win   Author: pyblish   File: test_subprocess.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_check_call_nonzero(self):
        # check_call() function with non-zero return code
        with self.assertRaises(subprocess.CalledProcessError) as c:
            subprocess.check_call([sys.executable, "-c",
                                   "import sys; sys.exit(47)"])
        self.assertEqual(c.exception.returncode, 47) 
Example 15
Project: pyblish-win   Author: pyblish   File: test_subprocess.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_check_output_nonzero(self):
        # check_call() function with non-zero return code
        with self.assertRaises(subprocess.CalledProcessError) as c:
            subprocess.check_output(
                    [sys.executable, "-c", "import sys; sys.exit(5)"])
        self.assertEqual(c.exception.returncode, 5) 
Example 16
Project: pyblish-win   Author: pyblish   File: test_os.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_setreuid_neg1(self):
        # Needs to accept -1.  We run this in a subprocess to avoid
        # altering the test runner's process state (issue8045).
        subprocess.check_call([
                sys.executable, '-c',
                'import os,sys;os.setreuid(-1,-1);sys.exit(0)']) 
Example 17
Project: pyblish-win   Author: pyblish   File: test_os.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_setregid_neg1(self):
        # Needs to accept -1.  We run this in a subprocess to avoid
        # altering the test runner's process state (issue8045).
        subprocess.check_call([
                sys.executable, '-c',
                'import os,sys;os.setregid(-1,-1);sys.exit(0)']) 
Example 18
Project: arm_now   Author: nongiach   File: options.py    MIT License 5 votes vote down vote up
def sync_download(rootfs, src, dest):
    fs = Filesystem(rootfs)
    if not fs.implemented():
        return
    fs.get(src, dest)
    if os.path.exists("root.tar"):
        subprocess.check_call("tar xf root.tar".split(' '))
        os.unlink("root.tar")
    else:
        pgreen("Use the 'save' command before exiting the vm to retrieve all files on the host") 
Example 19
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def put(self, src, dest, right=444):
        subprocess.check_call("e2cp -G 0 -O 0 -P".split(' ') + [str(right), src, self.rootfs + ":" + dest]) 
Example 20
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def get(self, src, dest):
        subprocess.check_call(["e2cp", self.rootfs + ":" + src, dest]) 
Example 21
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def rm(self, filename):
        def e2rm_warning(_exception):
            porange("WARNING: e2rm file already suppressed")
        with exall(subprocess.check_call, subprocess.CalledProcessError, e2rm_warning):
            subprocess.check_call(["e2rm", self.rootfs + ":" + filename]) 
Example 22
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def sed(self, regex, path, right=444):
        """ Replace with sed in the roofs
        Example: fs.sed('s/init.d\/S/init.d\/K/g', '/etc/init.d/rcK', right=755)
        Insecure !! command injection here but regex is not exposed to user input
        """
        with tempfile.TemporaryDirectory() as tempdir:
            print("Tempdir {}".format(tempdir))
            new = tempdir + "/new"
            old = tempdir + "/old"
            self.get(path, old)
            subprocess.check_call("sed '{regex}' {old} > {new}".format(
                regex=regex, new=new, old=old), shell=True)
            self.put(new, path, right=right) 
Example 23
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def resize(self, size):
        subprocess.check_call(["qemu-img", "resize", self.rootfs, size])
        subprocess.check_call(["e2fsck", "-fy", self.rootfs])
        subprocess.check_call(["resize2fs", self.rootfs])
        subprocess.check_call(["ls", "-lh", self.rootfs])
        pgreen("[+] Resized to {size}".format(size=size)) 
Example 24
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def correct(self):
        porange("[+] Correcting ... (be patient)".format(size=size))
        subprocess.check_call("mke2fs -F -b 1024 -m 0 -g 272".split() + [Config.ROOTFS]) 
Example 25
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def check(self):
        try:
            print(" Checking the filesystem ".center(80, "+"))
            subprocess.check_call(["e2fsck", "-vfy", self.rootfs])
        except subprocess.CalledProcessError as e:
            print(e)
            if str(e).find("returned non-zero exit status 1."):
                porange("It's ok but next time poweroff") 
Example 26
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def ls(self, path):
        ls_cmd = ["e2ls", self.rootfs + ":" + path]
        print((" " + " ".join(ls_cmd) + " ").center(80, "~"))
        subprocess.check_call(ls_cmd) 
Example 27
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def resize(self, size):
        subprocess.check_call(["qemu-img", "resize", self.rootfs, size])
        subprocess.check_call(["ls", "-lh", self.rootfs])
        pgreen("[+] Resized to {size}".format(size=size)) 
Example 28
Project: arm_now   Author: nongiach   File: filesystem.py    MIT License 5 votes vote down vote up
def resize(self, size):
        subprocess.check_call(["qemu-img", "resize", self.rootfs, size])
        subprocess.check_call(["ls", "-lh", self.rootfs])
        pgreen("[+] Resized to {size}".format(size=size)) 
Example 29
Project: BASS   Author: Cisco-Talos   File: bindiff.py    GNU General Public License v2.0 5 votes vote down vote up
def bindiff_pickle_export(self, sample, is_64_bit = True, timeout = None):
        """
        Load a sample into IDA Pro, perform autoanalysis and export a pickle file. 
        :param sample: The sample's path
        :param is_64_bit: If the sample needs to be analyzed by the 64 bit version of IDA
        :param timeout: Timeout for the analysis in seconds
        :return: The file name of the exported pickle database. The file needs
        to be deleted by the caller. Returns None on error.
        """

        data_to_send = {
            "timeout": timeout,
            "is_64_bit": is_64_bit}
        url = "%s/binexport_pickle" % next(self._urls)
        log.debug("curl -XPOST --data '%s' '%s'", json.dumps(data_to_send), url)
        response = requests.post(url, data = data_to_send, files = {os.path.basename(sample): open(sample, "rb")})
        if response.status_code == 200:
            handle_tar, path_tar = tempfile.mkstemp(suffix = ".tar.gz")
            with os.fdopen(handle_tar, "wb") as f:
                map(f.write, response.iter_content(1024))
            directory = tempfile.mkdtemp()
            subprocess.check_call(["tar", "xf", path_tar], cwd = directory)

            handle_bindiff, output_bindiff = tempfile.mkstemp(suffix = ".BinExport")
            with os.fdopen(handle_bindiff, "wb") as f:
                with open(os.path.join(directory, "output.BinExport"), "rb") as f2:
                    shutil.copyfileobj(f2, f)
            handle_pickle, output_pickle = tempfile.mkstemp(suffix = ".pickle")
            with os.fdopen(handle_pickle, "wb") as f:
                with open(os.path.join(directory, "output.pickle"), "rb") as f2:
                    shutil.copyfileobj(f2, f)
            os.unlink(path_tar)
            shutil.rmtree(directory)
            return output_bindiff, output_pickle
        else:
            log.error("Bindiff server responded with status code %d: %s", response.status_code, response.content)
            return None 
Example 30
Project: BASS   Author: Cisco-Talos   File: ida_service.py    GNU General Public License v2.0 5 votes vote down vote up
def check_call(cmd, cwd = os.getcwd(), timeout = None, env = os.environ):
    proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, env = env, cwd = cwd)
    if timeout:
        timer = Timer(timeout, proc.kill)
        timer.start()
    stdout, stderr = proc.communicate()
    if proc.returncode != 0:
        raise OSError(proc.returncode, "Subprocess returned error code") 
Example 31
Project: BASS   Author: Cisco-Talos   File: ida_service.py    GNU General Public License v2.0 5 votes vote down vote up
def bindiff_pickle_export():
    """
    Run the IDA Pro autoanalysis on the input file and export a BinExport database.
    :param input: The input file
    :return: Status code 200 and a JSON object containing the output database
        name in key 'output', or status code 422 on invalid parameters, 408 on
        timeout or 500 on other errors.
    """
    logger.info("bindiff_pickle_export called")

    directory = None
    try:
        directory = tempfile.mkdtemp()
        if len(request.files) != 1:
            return make_response(jsonify(error = "Missing file parameter"), 422)

        filename, file_ = request.files.items()[0]
        input_ = os.path.join(directory, sanitize_filename(filename))
        file_.save(input_)

        output_binexport = os.path.join(directory, "output.BinExport")
        output_pickle = os.path.join(directory, "output.pickle")

        timeout = request.form.get('timeout', None)
        is_64_bit = request.form.get('is_64_bit', True)
        try:
            run_ida(input_, is_64_bit, timeout, os.path.join(PREFIX, "export_binexport_pickle.py"), "binexport_pickle", output_binexport, output_pickle)
            logger.info("Command completed successfully")
            output_tar = os.path.join(directory, "output.tar.gz")
            subprocess.check_call(["tar", "czf", output_tar, os.path.relpath(output_binexport, directory), os.path.relpath(output_pickle, directory)], cwd = directory)
            return send_file(open(output_tar, "rb"), as_attachment = True, attachment_filename = "%s.tar.gz" % filename, mimetype = "application/gzip")
        except TimeoutError:
            return jsonify(error = "Program execution timed out"), 408
        except OSError as err:
            return jsonify(error = "Program execution failed with error %d" % err.errno), 500
    finally:
        if directory is not None:
            shutil.rmtree(directory) 
Example 32
Project: BASS   Author: Cisco-Talos   File: ida_service.py    GNU General Public License v2.0 5 votes vote down vote up
def bindiff_compare():
    logger.info("bindiff_compare called")

    input_dir = tempfile.mkdtemp()
    output_dir = tempfile.mkdtemp()
    try:
        primary = os.path.join(input_dir, "primary")
        secondary = os.path.join(input_dir, "secondary")
        try:
            request.files["primary"].save(primary)
            request.files["secondary"].save(secondary)
        except KeyError:
            return make_response(jsonify(error="Missing parameter 'primary' or 'secondary'"), 422)

        timeout = request.form.get('timeout', None)

        cmd = (BINDIFF_DIFFER, "--primary", primary, "--secondary", secondary, "--output_dir", output_dir)
        logger.info("Executing %s", " ".join("'%s'" % x for x in cmd))
        check_call(cmd, cwd = output_dir, timeout = timeout)
        db_path = [os.path.join(output_dir, x) for x in os.listdir(output_dir)]
        if len(db_path) != 1:
            return make_response(jsonify(error = "BinDiff generated 0 or several output files"), 500)
        return send_file(open(db_path[0], "rb"), as_attachment = True, attachment_filename = "BinDiff.sqlite3", mimetype = "application/binary")
    except OSError as err:
        if err.errno == -9:
            return make_response(jsonify(error = "Program execution timed out"), 408)
        else:
            return make_response(jsonify(error = "Program execution failed with error %d" % err.errno), 500)
    finally:
        shutil.rmtree(input_dir)
        shutil.rmtree(output_dir) 
Example 33
Project: BASS   Author: Cisco-Talos   File: ida_service.py    GNU General Public License v2.0 5 votes vote down vote up
def run_ida(db, is_64_bit, timeout, script, *args):
    if os.path.exists(os.path.join(IDA_DIR, "idat")):
        # This is IDA >= 7.0
        IDA_EXECUTABLE = os.path.join(IDA_DIR, "idat")
    else:
        IDA_EXECUTABLE = os.path.join(IDA_DIR, "idal")

    if is_64_bit:
        ida = "{}64".format(IDA_EXECUTABLE)
    else:
        ida = IDA_EXECUTABLE

    cmd = (ida, "-S{} {}".format(script, " ".join("\"{}\"".format(x) for x in args)), "-B", db)
    env = os.environ.copy()
    env["TVHEADLESS"] = "true"
    env["IDALOG"] = os.path.join(LOGS_DIR, datetime.datetime.strftime(datetime.datetime.now(), "ida_%Y-%m-%d_%H-%M-%S.%f.log"))
    logger.info("Executing command %s, log output is in '%s'", " ".join("'%s'" % x for x in cmd), env["IDALOG"])
    try:
        check_call(cmd, timeout = timeout, env = env)
    except OSError as err:
        if err.errno == -9:
            raise TimeoutError(err.errno, "Program execution timed out")
        else:
            raise err


# Run ida only with auto-analysis, no scripts 
Example 34
Project: BASS   Author: Cisco-Talos   File: ida_service.py    GNU General Public License v2.0 5 votes vote down vote up
def bindiff_compare():
    logger.info("bindiff_compare called")

    input_dir = tempfile.mkdtemp()
    output_dir = tempfile.mkdtemp()
    try:
        primary = os.path.join(input_dir, "primary")
        secondary = os.path.join(input_dir, "secondary")
        try:
            request.files["primary"].save(primary)
            request.files["secondary"].save(secondary)
        except KeyError:
            return make_response(jsonify(error="Missing parameter 'primary' or 'secondary'"), 422)

        timeout = request.form.get('timeout', None)

        cmd = (BINDIFF_DIFFER, "--primary", primary, "--secondary", secondary, "--output_dir", output_dir)
        logger.info("Executing %s", " ".join("'%s'" % x for x in cmd))
        check_call(cmd, cwd = output_dir, timeout = timeout)
        db_path = [os.path.join(output_dir, x) for x in os.listdir(output_dir)]
        if len(db_path) != 1:
            return make_response(jsonify(error = "BinDiff generated 0 or several output files"), 500)
        return send_file(open(db_path[0], "rb"), as_attachment = True, attachment_filename = "BinDiff.sqlite3", mimetype = "application/binary")
    except OSError as err:
        if err.errno == -9:
            return make_response(jsonify(error = "Program execution timed out"), 408)
        else:
            return make_response(jsonify(error = "Program execution failed with error %d" % err.errno), 500)
    finally:
        shutil.rmtree(input_dir)
        shutil.rmtree(output_dir) 
Example 35
Project: CFN-CR-PythonLambdaLayer   Author: kisst   File: lambda_function.py    GNU General Public License v3.0 5 votes vote down vote up
def install_with_pip(packages):
    """
    Install pip package into /tmp folder
    """
    print(" -- Installing pip packages")
    logfile = open("/tmp/pip-install.log", "wb")
    for package in packages:
        print(" ---- Installing {}".format(package))
        subprocess.check_call([
            sys.executable, '-m', 'pip', 'install',
            '--upgrade', '-t', PKG_DIR, package], stdout=logfile) 
Example 36
Project: kvmd   Author: pikvm   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def _remount(path: str, rw: bool) -> None:
    mode = ("rw" if rw else "ro")
    _log(f"Remouning {path} to {mode.upper()}-mode ...")
    try:
        subprocess.check_call([_MOUNT_PATH, "--options", f"remount,{mode}", path])
    except subprocess.CalledProcessError as err:
        raise SystemExit(f"Can't remount: {err}") 
Example 37
Project: petuk.corp   Author: fnugrahendi   File: DatabaseCreator.py    GNU General Public License v2.0 5 votes vote down vote up
def Execute(self):
		#~ f = open("creator.md","w")
		#~ f.write(self.sqldump)
		#~ f.close()
		#~ f = open("creator.md","r")
		#~ subprocess.check_call(self.si_om.BasePath[:-1]+"\\mysql\\bin\\mysql.exe --port=44559 -u root test",stdin=f,shell=True)
		#~ f.close()
		#~ subprocess.check_call(self.si_om.BasePath+"mysql/bin/echo.exe < "+"creator.md")
		#~ self.si_om.DatabaseRunQuery(self.sqldump)
		#---- ga bisa pakai runQuery --- commit harus menunggu beberapa detik
		#---- new attempt: exclusive mysqldb
		try:
			self.db = MySQLdb.connect(self.si_om.dbHost,self.si_om.dbUser,self.si_om.dbPass,self.si_om.dbDatabase)
			print ("connected database to generic mysql port")
		except:
			try:
				print "gagal"
				self.db = MySQLdb.Connect(host=self.si_om.dbHost, port=self.si_om.dbPort, user=self.si_om.dbUser, passwd=self.si_om.dbPass, db=self.si_om.dbDatabase)
				print ("connected database to Garvin port")
			except:
				print "gagal"
				#~ exit (1)
		#-- sudah terkoneksi, bentuk cursor
		try:
			self.cursor = self.db.cursor()
		except:return
		self.cursor.execute(self.sqldump)
		self.creatortimer = QtCore.QTimer(self.si_om)
		self.creatortimer.timeout.connect(self.Selesai)
		self.creatortimer.start(4000) 
Example 38
Project: twonicorn   Author: UnblockedByOps   File: deploy.py    Apache License 2.0 5 votes vote down vote up
def check_deployment_dirs(deployments):

    logging.info('Checking for existence of deployment dirs')
    retval = True

    for k in deployments.keys():
        logging.debug('Checking for existence of path: %s' % deployments[k]['deploy_path'])

        if not os.path.isdir(deployments[k]['deploy_path']):
            logging.warn('Path does not exist: %s Creating...' % deployments[k]['deploy_path'])
            retval = False
            subprocess.check_call(["/usr/bin/sudo",
                                   "/bin/mkdir",
                                   "-p",
                                   deployments[k]['deploy_path']])
            user = getpass.getuser()

            logging.warn('Changing permissions of %s to user: %s' % (deployments[k]['deploy_path'],user))
            subprocess.check_call(["/usr/bin/sudo",
                                   "/bin/chown",
                                   "-R",
                                   '{0}:'.format(user),
                                   deployments[k]['deploy_path']])
        else:
            logging.debug('Path exists: %s' % deployments[k]['deploy_path'])

    return retval 
Example 39
Project: twonicorn   Author: UnblockedByOps   File: deploy.py    Apache License 2.0 5 votes vote down vote up
def dl_artifact_http(tmp_dir=None, download_url=None, revision=None):

    logging.info('Downloading revision: %s artifact: %s'
                 % (revision,
                    download_url))
    artifact = download_url.rsplit('/', 1)
    artifact = artifact[1]

    if not os.path.exists(tmp_dir):
        logging.debug('Creating dir: %s' % tmp_dir)
        os.makedirs(tmp_dir)

    logging.debug('Downloading to dir: %s' % tmp_dir)
    try:
        if verify_ssl:
            subprocess.check_call(["curl",
                                   "-s",
                                   "--cacert",
                                   ca_bundle_file,
                                   "-o",
                                   tmp_dir + '/' + artifact,
                                   download_url])
        else:
            logging.warn('ssl cert check disabled for download URL: %s' % download_url)
            subprocess.check_call(["curl",
                                   "-s",
                                   "-k",
                                   "-o",
                                   tmp_dir + '/' + artifact,
                                   download_url])
    except Exception, e:
        logging.error('Artifact download failed: %s' % e) 
Example 40
Project: twonicorn   Author: UnblockedByOps   File: deploy.py    Apache License 2.0 5 votes vote down vote up
def install_py_package(pip, payload):
    logging.info('Installing package: %s' % payload)
    logging.info('Install command: %s install --pre -U %s' % (pip, payload))
    subprocess.check_call([pip,
                           "install",
                           "--pre",
                           "-U",
                           payload])
    logging.info('The following packages are installed:')
    subprocess.check_call([pip,
                           "freeze"]) 
Example 41
Project: twonicorn   Author: UnblockedByOps   File: deploy.py    Apache License 2.0 5 votes vote down vote up
def create_py_virtualenv(deploy_path):
    logging.info('Creating Virtualenv: %s' % deploy_path)
    subprocess.check_call(['virtualenv',
                           deploy_path]) 
Example 42
Project: twonicorn   Author: UnblockedByOps   File: deploy.py    Apache License 2.0 5 votes vote down vote up
def sync_artifact_jar(tmp_dir_id, deploy_path, artifact_file):
    # rsync it
    logging.debug('Rsyncing %s to %s'
                  % (tmp_dir_id,
                     deploy_path))
    subprocess.check_call(["rsync",
                           "-ra",
                           "--delete",
                           tmp_dir_id + '/',
                           deploy_path]) 
Example 43
Project: twonicorn   Author: UnblockedByOps   File: deploy.py    Apache License 2.0 5 votes vote down vote up
def sync_artifact_conf(tmp_dir_id=None, deploy_path=None):
    logging.debug('Rsyncing %s to %s' % (tmp_dir_id, deploy_path))
    subprocess.check_call(["rsync", "-ra", tmp_dir_id + '/', deploy_path]) 
Example 44
Project: poupool   Author: lostcontrol   File: device.py    GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, name, port):
        super().__init__(name)
        # Disable hangup-on-close to avoid having the Arduino resetting when closing the
        # connection. Useful for debugging and to avoid interrupting a move.
        # https://playground.arduino.cc/Main/DisablingAutoResetOnSerialConnection
        subprocess.check_call(["stty", "-F", port, "-hupcl"])
        self.__serial = serial.Serial(port, baudrate=9600, timeout=0.1)
        self.__sio = io.TextIOWrapper(io.BufferedRWPair(self.__serial, self.__serial)) 
Example 45
Project: gluster-integration   Author: Tendrl   File: setup.py    GNU Lesser General Public License v2.1 5 votes vote down vote up
def run(self):
        # Create Git tag
        tag_name = 'v%s' % version
        cmd = ['git', 'tag', '-a', tag_name, '-m', 'version %s' % version]
        if self.sign:
            cmd.append('-s')
        print(' '.join(cmd))
        subprocess.check_call(cmd)

        # Push Git tag to origin remote
        cmd = ['git', 'push', 'origin', tag_name]
        print(' '.join(cmd))
        subprocess.check_call(cmd) 
Example 46
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: index.py    MIT License 5 votes vote down vote up
def __init__(self, url=None):
        """
        Initialise an instance.

        :param url: The URL of the index. If not specified, the URL for PyPI is
                    used.
        """
        self.url = url or DEFAULT_INDEX
        self.read_configuration()
        scheme, netloc, path, params, query, frag = urlparse(self.url)
        if params or query or frag or scheme not in ('http', 'https'):
            raise DistlibException('invalid repository: %s' % self.url)
        self.password_handler = None
        self.ssl_verifier = None
        self.gpg = None
        self.gpg_home = None
        with open(os.devnull, 'w') as sink:
            # Use gpg by default rather than gpg2, as gpg2 insists on
            # prompting for passwords
            for s in ('gpg', 'gpg2'):
                try:
                    rc = subprocess.check_call([s, '--version'], stdout=sink,
                                               stderr=sink)
                    if rc == 0:
                        self.gpg = s
                        break
                except OSError:
                    pass 
Example 47
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: configuration.py    MIT License 5 votes vote down vote up
def open_in_editor(self, options, args):
        editor = self._determine_editor(options)

        fname = self.configuration.get_file_to_edit()
        if fname is None:
            raise PipError("Could not determine appropriate file.")

        try:
            subprocess.check_call([editor, fname])
        except subprocess.CalledProcessError as e:
            raise PipError(
                "Editor Subprocess exited with exit code {}"
                .format(e.returncode)
            ) 
Example 48
Project: 2015-SpaceInvaders-Bot-Python   Author: EntelectChallenge   File: ez_setup.py    MIT License 5 votes vote down vote up
def _clean_check(cmd, target):
    """
    Run the command to download target.

    If the command fails, clean up before re-raising the error.
    """
    try:
        subprocess.check_call(cmd)
    except subprocess.CalledProcessError:
        if os.access(target, os.F_OK):
            os.unlink(target)
        raise 
Example 49
Project: 2015-SpaceInvaders-Bot-Python   Author: EntelectChallenge   File: ez_setup.py    MIT License 5 votes vote down vote up
def has_powershell():
    """Determine if Powershell is available."""
    if platform.system() != 'Windows':
        return False
    cmd = ['powershell', '-Command', 'echo test']
    with open(os.path.devnull, 'wb') as devnull:
        try:
            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
        except Exception:
            return False
    return True 
Example 50
Project: 2015-SpaceInvaders-Bot-Python   Author: EntelectChallenge   File: ez_setup.py    MIT License 5 votes vote down vote up
def has_curl():
    cmd = ['curl', '--version']
    with open(os.path.devnull, 'wb') as devnull:
        try:
            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
        except Exception:
            return False
    return True 
Example 51
Project: 2015-SpaceInvaders-Bot-Python   Author: EntelectChallenge   File: ez_setup.py    MIT License 5 votes vote down vote up
def has_wget():
    cmd = ['wget', '--version']
    with open(os.path.devnull, 'wb') as devnull:
        try:
            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
        except Exception:
            return False
    return True 
Example 52
Project: flasky   Author: RoseOu   File: index.py    MIT License 5 votes vote down vote up
def __init__(self, url=None):
        """
        Initialise an instance.

        :param url: The URL of the index. If not specified, the URL for PyPI is
                    used.
        """
        self.url = url or DEFAULT_INDEX
        self.read_configuration()
        scheme, netloc, path, params, query, frag = urlparse(self.url)
        if params or query or frag or scheme not in ('http', 'https'):
            raise DistlibException('invalid repository: %s' % self.url)
        self.password_handler = None
        self.ssl_verifier = None
        self.gpg = None
        self.gpg_home = None
        with open(os.devnull, 'w') as sink:
            for s in ('gpg2', 'gpg'):
                try:
                    rc = subprocess.check_call([s, '--version'], stdout=sink,
                                               stderr=sink)
                    if rc == 0:
                        self.gpg = s
                        break
                except OSError:
                    pass 
Example 53
Project: flasky   Author: RoseOu   File: test_svn.py    MIT License 5 votes vote down vote up
def _do_svn_check():
    try:
        subprocess.check_call(["svn", "--version"],
                              shell=(sys.platform == 'win32'))
        return True
    except (OSError, subprocess.CalledProcessError):
        return False 
Example 54
Project: core   Author: lifemapper   File: makeflow_debugger.py    GNU General Public License v3.0 5 votes vote down vote up
def cleanupMakeflow(mfDag, workspace, cleanupVal, success, log):
    """
    @summary: Clean up a Makeflow
    """
    if cleanupVal in [-1, 1, 2] or (cleanupVal == 0 and success):
        cleanCmd = getMakeflowCleanCommand(mfDag, workspace)
        try:
            subprocess.check_call(cleanCmd, shell=True)
        except subprocess.CalledProcessError, e:
            log.debug('Could not clean up Makeflow:')
            log.debug(str(e)) 
Example 55
Project: PythonMicroservicesDevelopment_Code   Author: mtianyan   File: 07_bandit.py    Apache License 2.0 5 votes vote down vote up
def run_command(cmd):
	return subprocess.check_call(cmd, shell=True) 
Example 56
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: mxdoc.py    Apache License 2.0 5 votes vote down vote up
def _run_cmd(cmds):
    """Run commands, raise exception if failed"""
    if not isinstance(cmds, str):
        cmds = "".join(cmds)
    print("Execute \"%s\"" % cmds)
    try:
        subprocess.check_call(cmds, shell=True)
    except subprocess.CalledProcessError as err:
        print(err)
        raise err 
Example 57
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: docker_cache.py    Apache License 2.0 5 votes vote down vote up
def _upload_image(registry, docker_tag, image_id) -> None:
    """
    Upload the passed image by id, tag it with docker tag and upload to S3 bucket
    :param registry: Docker registry name
    :param docker_tag: Docker tag
    :param image_id: Image id
    :return: None
    """
    _login_dockerhub()
    # We don't have to retag the image since it is already in the right format
    logging.info('Uploading %s (%s) to %s', docker_tag, image_id, registry)
    push_cmd = ['docker', 'push', docker_tag]
    subprocess.check_call(push_cmd) 
Example 58
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: docker_cache.py    Apache License 2.0 5 votes vote down vote up
def _login_dockerhub():
    """
    Login to the Docker Hub account
    :return: None
    """
    dockerhub_credentials = _get_dockerhub_credentials()
    login_cmd = ['docker', 'login', '--username', dockerhub_credentials['username'], '--password',
                 dockerhub_credentials['password']]
    subprocess.check_call(login_cmd) 
Example 59
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_docker_cache.py    Apache License 2.0 5 votes vote down vote up
def _start_local_docker_registry(cls):
        # https://docs.docker.com/registry/deploying/#run-a-local-registrys
        start_cmd = [
            'docker', 'run', '-d', '-p', '{}:{}'.format(DOCKER_REGISTRY_PORT, DOCKER_REGISTRY_PORT),
            '--name', DOCKER_REGISTRY_NAME, 'registry:2'
        ]
        subprocess.check_call(start_cmd) 
Example 60
Project: yarnspawner   Author: jupyterhub   File: conftest.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def kinit():
    if HAS_KERBEROS:
        subprocess.check_call(["kinit", "-kt", KEYTAB_PATH, "testuser"]) 
Example 61
Project: Pathfinder   Author: MatthewBCooke   File: Pathfinder.py    GNU General Public License v3.0 5 votes vote down vote up
def updatePathfinder(self):
        subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-U',
            'jsl-pathfinder'])
        exit(0) 
Example 62
Project: docker-taiga   Author: riotkit-org   File: slack.py    GNU General Public License v3.0 5 votes vote down vote up
def after_application_migration():
    """ Executes on each container start in the entrypoint, after application was migrated """
    subprocess.check_call(
        'python3 manage.py migrate taiga_contrib_slack',
        shell=True
    ) 
Example 63
Project: docker-taiga   Author: riotkit-org   File: slack.py    GNU General Public License v3.0 5 votes vote down vote up
def frontend_setup():
    """ Installs frontend at build time """

    subprocess.check_call(
        'svn export "https://github.com/taigaio/taiga-contrib-slack/tags/' + VERSION + '/front/dist" "slack"',
        shell=True,
        stdout=sys.stdout,
        stderr=sys.stderr
    ) 
Example 64
Project: docker-taiga   Author: riotkit-org   File: slack.py    GNU General Public License v3.0 5 votes vote down vote up
def backend_setup():
    """ Installs backend at build time """

    subprocess.check_call(
        'pip3 install ' + PIP_PACKAGE + '==' + VERSION,
        shell=True,
        stdout=sys.stdout,
        stderr=sys.stderr
    ) 
Example 65
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: schema_diff.py    Apache License 2.0 5 votes vote down vote up
def diff_files(filename1, filename2):
    pipeline = ['diff -U 3 %(filename1)s %(filename2)s'
                % {'filename1': filename1, 'filename2': filename2}]

    # Use colordiff if available
    if subprocess.call(['which', 'colordiff']) == 0:
        pipeline.append('colordiff')

    pipeline.append('less -R')

    cmd = ' | '.join(pipeline)
    subprocess.check_call(cmd, shell=True)


# Database 
Example 66
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: schema_diff.py    Apache License 2.0 5 votes vote down vote up
def create(self, name):
        subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name]) 
Example 67
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: schema_diff.py    Apache License 2.0 5 votes vote down vote up
def drop(self, name):
        subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name]) 
Example 68
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: schema_diff.py    Apache License 2.0 5 votes vote down vote up
def dump(self, name, dump_filename):
        subprocess.check_call(
                'mysqldump -u root %(name)s > %(dump_filename)s'
                % {'name': name, 'dump_filename': dump_filename},
                shell=True) 
Example 69
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: schema_diff.py    Apache License 2.0 5 votes vote down vote up
def create(self, name):
        subprocess.check_call(['createdb', name]) 
Example 70
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: schema_diff.py    Apache License 2.0 5 votes vote down vote up
def dump(self, name, dump_filename):
        subprocess.check_call(
                'pg_dump %(name)s > %(dump_filename)s'
                % {'name': name, 'dump_filename': dump_filename},
                shell=True) 
Example 71
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: schema_diff.py    Apache License 2.0 5 votes vote down vote up
def db2cmd(cls, cmd):
        """Wraps a command to be run under the DB2 instance user."""
        subprocess.check_call('su - $(db2ilist) -c "%s"' % cmd, shell=True) 
Example 72
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: schema_diff.py    Apache License 2.0 5 votes vote down vote up
def dump(self, name, dump_filename):
        self.db2cmd('db2look -d %(name)s -e -o %(dump_filename)s' %
                    {'name': name, 'dump_filename': dump_filename})
        # The output file gets dumped to the db2 instance user's home directory
        # so we have to copy it back to our current working directory.
        subprocess.check_call('cp /home/$(db2ilist)/%s ./' % dump_filename,
                              shell=True) 
Example 73
Project: fs_image   Author: facebookincubator   File: volume_for_repo.py    MIT License 4 votes vote down vote up
def get_volume_for_current_repo(min_free_bytes, artifacts_dir):
    '''
    Multiple repos need to be able to concurrently build images on the same
    host.  The cleanest way to achieve such isolation is to supply each repo
    with its own volume, which will store the repo's image build outputs.

    It is easiest to back this volume with a loop device. The appropriate
    size of the loop device depends on the expected size of the target being
    built.  To address this this by ensuring that prior to every build, the
    volume has at least a specified amount of space.  The default in
    `image_layer` is large enough for most builds, but really huge
    `image_layer` targets can further increase their requested
    `min_free_bytes`.

    Image-build tooling **must never** access paths in this volume without
    going through this function.  Otherwise, the volume will not get
    remounted correctly if the host containing the repo got rebooted.

    PRE-CONDITION: `artifacts_dir` exists and is writable by `root`.
    '''
    if not os.path.exists(artifacts_dir):  # pragma: no cover
        raise RuntimeError(f'{artifacts_dir} must exist')

    volume_dir = os.path.join(artifacts_dir, VOLUME_DIR)
    subprocess.check_call([
        # While Buck probably does not call this concurrently under normal
        # circumstances, the worst-case outcome is that we lose or corrupt
        # the whole buld cache, so add some locking to be on the safe side.
        'flock',
        os.path.join(artifacts_dir, '.lock.set_up_volume.sh.never.rm.or.mv'),
        'sudo',
        os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            'set_up_volume.sh',
        ),
        str(int(min_free_bytes)),  # Accepts floats & ints
        os.path.join(artifacts_dir, IMAGE_FILE),
        volume_dir,
    ])
    # We prefer to have the volume owned by the repo user, instead of root:
    #  - The trusted repo user has to be able to access the built
    #    subvolumes, but nobody else should be able to (they might contain
    #    setuid binaries & similar).  Thus, subvols ought to have wrapper
    #    directories owned by the user, with mode 0700.
    #  - This reduces the number of places we have to `sudo` to create
    #    directories inside the subvolume.
    subprocess.check_call([
        'sudo', 'chown', f'{os.getuid()}:{os.getgid()}', volume_dir,
    ])
    return volume_dir 
Example 74
Project: fs_image   Author: facebookincubator   File: test_unshare.py    MIT License 4 votes vote down vote up
def test_mount_namespace(self):
        try:
            sleep_pid = None
            with tempfile.TemporaryDirectory() as mnt_src, \
                    tempfile.TemporaryDirectory() as mnt_dest1, \
                    tempfile.TemporaryDirectory() as mnt_dest2:
                with open(os.path.join(mnt_src, 'cypa'), 'w') as outfile:
                    outfile.write('kvoh')

                def check_mnt_dest(mnt_dest: str):
                    cypa = os.path.join(mnt_dest, 'cypa')
                    # The outer NS cannot see the mount
                    self.assertFalse(os.path.exists(cypa))
                    # But we can read it from inside the namespace
                    self.assertEqual(b'kvoh', subprocess.check_output(
                        nsenter_as_user(unshare, 'cat', cypa),
                    ))

                with Unshare([Namespace.MOUNT]) as unshare:
                    # Without a PID namespace, this will outlive the
                    # __exit__ -- in fact, this process would leak but for
                    # our `finally`.
                    proc, sleep_pid = self._popen_sleep_forever(unshare)

                    subprocess.check_call(nsenter_as_root(
                        unshare, 'mount', mnt_src, mnt_dest1, '-o', 'bind',
                    ))
                    check_mnt_dest(mnt_dest1)

                    # Mount namespaces remain usable after the keepalive dies
                    self._kill_keepalive(unshare)

                    # We can make a second mount inside the namespace
                    subprocess.check_call(nsenter_as_root(
                        unshare, 'mount', mnt_src, mnt_dest2, '-o', 'bind',
                    ))
                    check_mnt_dest(mnt_dest2)
                    check_mnt_dest(mnt_dest1)  # The old mount is still good

                # Outside the context, nsenter cannot work. There's no way
                # to test the mounts are gone since we don't have any handle
                # by which to access them.  That's the point.
                with self.assertRaisesRegex(
                    RuntimeError, 'Must nsenter from inside an Unshare',
                ):
                    check_mnt_dest(mnt_dest1)

            time.sleep(2)  # Give some time for `sleep` to exit erroneously
            self.assertIs(None, proc.poll())  # Processes leak
        finally:
            # Ensure we don't leak the `sleep infinity` -- since it was
            # started via `sudo`, `subprocess` cannot kill it automatically.
            if sleep_pid:
                if proc.poll() is None:
                    os.kill(sleep_pid, signal.SIGTERM)
                proc.wait() 
Example 75
Project: fs_image   Author: facebookincubator   File: test_package_image.py    MIT License 4 votes vote down vote up
def test_package_image_as_squashfs(self):
        with self._package_image(
            self._sibling_path('create_ops.layer'), 'squashfs',
        ) as out_path, TempSubvolumes(sys.argv[0]) as temp_subvolumes, \
                tempfile.NamedTemporaryFile() as temp_sendstream:
            subvol = temp_subvolumes.create('subvol')
            with Unshare([Namespace.MOUNT, Namespace.PID]) as unshare, \
                    tempfile.TemporaryDirectory() as mount_dir:
                subprocess.check_call(nsenter_as_root(
                    unshare, 'mount', '-t', 'squashfs', '-o', 'loop',
                    out_path, mount_dir,
                ))
                # `unsquashfs` would have been cleaner than `mount` +
                # `rsync`, and faster too, but unfortunately it corrupts
                # device nodes as of v4.3.
                subprocess.check_call(nsenter_as_root(
                    unshare, 'rsync', '--archive', '--hard-links',
                    '--sparse', '--xattrs', mount_dir + '/', subvol.path(),
                ))
            with subvol.mark_readonly_and_write_sendstream_to_file(
                temp_sendstream
            ):
                pass
            original_render = _render_sendstream_path(
                self._sibling_path('create_ops-original.sendstream'),
            )
            # SquashFS does not preserve the original's cloned extents of
            # zeros, nor the zero-hole-zero patter.  In all cases, it
            # (efficiently) transmutes the whole file into 1 sparse hole.
            self.assertEqual(original_render[1].pop('56KB_nuls'), [
                '(File d57344(create_ops@56KB_nuls_clone:0+49152@0/' +
                'create_ops@56KB_nuls_clone:49152+8192@49152))'
            ])
            original_render[1]['56KB_nuls'] = ['(File h57344)']
            self.assertEqual(original_render[1].pop('56KB_nuls_clone'), [
                '(File d57344(create_ops@56KB_nuls:0+49152@0/' +
                'create_ops@56KB_nuls:49152+8192@49152))'
            ])
            original_render[1]['56KB_nuls_clone'] = ['(File h57344)']
            self.assertEqual(original_render[1].pop('zeros_hole_zeros'), [
                '(File d16384h16384d16384)'
            ])
            original_render[1]['zeros_hole_zeros'] = ['(File h49152)']
            self.assertEqual(
                original_render, _render_sendstream_path(temp_sendstream.name),
            ) 
Example 76
Project: fs_image   Author: facebookincubator   File: subvolume_garbage_collector.py    MIT License 4 votes vote down vote up
def garbage_collect_subvolumes(refcounts_dir, subvolumes_dir):
    # IMPORTANT: We must list subvolumes BEFORE refcounts. The risk is that
    # this runs concurrently with another build, which will create a new
    # refcount & subvolume (in that order).  If we read refcounts first, we
    # might end up winning the race against the other build, and NOT reading
    # the new refcount.  If we then lose the second part of the race, we
    # would find the subvolume that the other process just created, and
    # delete it.
    subvol_wrappers = set(list_subvolume_wrappers(subvolumes_dir))
    subvol_wrapper_to_nlink = dict(list_refcounts(refcounts_dir))

    # Delete subvolumes (& their wrappers) with insufficient refcounts.
    for subvol_wrapper in subvol_wrappers:
        nlink = subvol_wrapper_to_nlink.get(subvol_wrapper, 0)
        if nlink >= 2:
            if nlink > 2:
                # Not sure how this might happen, but it seems non-fatal...
                log.error(f'{nlink} > 2 links to subvolume {subvol_wrapper}')
            continue
        refcount_path = os.path.join(refcounts_dir, f'{subvol_wrapper}.json')
        log.warning(
            f'Deleting {subvol_wrapper} since its refcount has {nlink} links'
        )
        # Start by unlinking the refcount to dramatically decrease the
        # chance of leaving an orphaned refcount file on disk.  The most
        # obvious way to get an orphaned refcount is for this program to
        # abort between the line that creates the refcount link, and the
        # next line that creates the subvolume wrapper.
        #
        # I do not see a great way to completely eliminate orphan refcount
        # files.  One could try to have a separate pass that flocks the
        # refcount file before removing it, and to also flock the refcount
        # file before creating the wrapper directory.  But, since file
        # creation & flock cannot be atomic, this leaves us open to a race
        # where a concurrent GC pass removes the refcount link immediately
        # after it gets created, so that part of the code would have to be
        # willing to repeat the race until it wins.  In all, that extra
        # complexity is far too ugly compared to the slim risk or leaving
        # some unused refcount files on disk.
        if nlink:
            os.unlink(refcount_path)
        wrapper_path = os.path.join(subvolumes_dir, subvol_wrapper)
        wrapper_content = os.listdir(wrapper_path)
        if len(wrapper_content) > 1:
            raise RuntimeError(f'{wrapper_path} must contain only the subvol')
        if len(wrapper_content) == 1:  # Empty wrappers are OK to GC, too.
            subprocess.check_call([
                'sudo', 'btrfs', 'subvolume', 'delete',
                os.path.join(
                    subvolumes_dir,
                    # Subvols are wrapped in a user-owned temporary directory,
                    # following the convention `{rule name}:{version}/{subvol}`.
                    subvol_wrapper,
                    wrapper_content[0],
                ),
            ])
        os.rmdir(wrapper_path) 
Example 77
Project: BASS   Author: Cisco-Talos   File: ida_service.py    GNU General Public License v2.0 4 votes vote down vote up
def bindiff_pickle_export():
    """
    Run the IDA Pro autoanalysis on the input file and export a BinExport database.
    :param input: The input file
    :return: Status code 200 and a JSON object containing the output database
        name in key 'output', or status code 422 on invalid parameters, 408 on
        timeout or 500 on other errors.
    """
    logger.info("bindiff_pickle_export called")

    directory = None
    try:
        directory = tempfile.mkdtemp()
        if len(request.files) != 1:
            return make_response(jsonify(error = "Missing file parameter"), 422)

        filename, file_ = request.files.items()[0]
        input_ = os.path.join(directory, sanitize_filename(filename))
        file_.save(input_)

        output_binexport = os.path.join(directory, "output.BinExport")
        output_pickle = os.path.join(directory, "output.pickle")

        timeout = request.form.get('timeout', None)
        is_64_bit = request.form.get('is_64_bit', True)
        try:
            run_ida(input_, is_64_bit, timeout, os.path.join(PREFIX, "export_binexport_pickle.py"), "binexport_pickle", output_binexport, output_pickle)
            logger.info("Command completed successfully")
            output_tar = os.path.join(directory, "output.tar.gz")
            subprocess.check_call(["tar", "czf", output_tar, os.path.relpath(output_binexport, directory), os.path.relpath(output_pickle, directory)], cwd = directory)
            return send_file(open(output_tar, "rb"), as_attachment = True, attachment_filename = "%s.tar.gz" % filename, mimetype = "application/gzip")
        except TimeoutError:
            return jsonify(error = "Program execution timed out"), 408
        except OSError as err:
            return jsonify(error = "Program execution failed with error %d" % err.errno), 500
    finally:
        if directory is not None:
            shutil.rmtree(directory) 
Example 78
Project: CFN-CR-PythonLambdaLayer   Author: kisst   File: lambda_function.py    GNU General Public License v3.0 4 votes vote down vote up
def publish_layer(name, region):
    """
    Publish the built zip as a Lambda layer
    """
    logfile = open("/tmp/pip-install.log", "wb")
    subprocess.check_call([
        sys.executable, '-m', 'pip', 'install',
        '--upgrade', '-t', '/tmp/upload', 'boto3'], stdout=logfile)

    # my pip location
    sys.path.insert(0, '/tmp/upload')
    import botocore
    importlib.reload(botocore)
    import boto3

    client = boto3.client('lambda', region_name=region)
    response = client.publish_layer_version(
        LayerName=name,
        Description='Build with CFN Custom Resource',
        Content={'ZipFile': file_get_content('/tmp/layer.zip')},
        CompatibleRuntimes=['python3.7'])
    return response['LayerVersionArn'] 
Example 79
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: build.py    Apache License 2.0 4 votes vote down vote up
def build_docker(platform: str, docker_binary: str, registry: str, num_retries: int, use_cache: bool) -> str:
    """
    Build a container for the given platform
    :param platform: Platform
    :param docker_binary: docker binary to use (docker/nvidia-docker)
    :param registry: Dockerhub registry name
    :param num_retries: Number of retries to build the docker image
    :param use_cache: will pass cache_from to docker to use the previously pulled tag
    :return: Id of the top level image
    """
    tag = get_docker_tag(platform=platform, registry=registry)
    logging.info("Building docker container tagged '%s' with %s", tag, docker_binary)
    #
    # We add a user with the same group as the executing non-root user so files created in the
    # container match permissions of the local user. Same for the group.
    #
    # These variables are used in the docker files to create user and group with these ids.
    # see: docker/install/ubuntu_adduser.sh
    #
    # cache-from is needed so we use the cached images tagged from the remote via
    # docker pull see: docker_cache.load_docker_cache
    #
    # This also prevents using local layers for caching: https://github.com/moby/moby/issues/33002
    # So to use local caching, we should omit the cache-from by using --no-dockerhub-cache argument to this
    # script.
    #
    # This doesn't work with multi head docker files.
    #
    cmd = [docker_binary, "build",
           "-f", get_dockerfile(platform),
           "--build-arg", "USER_ID={}".format(os.getuid()),
           "--build-arg", "GROUP_ID={}".format(os.getgid())]
    if use_cache:
        cmd.extend(["--cache-from", tag])
    cmd.extend(["-t", tag, get_dockerfiles_path()])

    @retry(subprocess.CalledProcessError, tries=num_retries)
    def run_cmd():
        logging.info("Running command: '%s'", ' '.join(cmd))
        check_call(cmd)

    run_cmd()
    # Get image id by reading the tag. It's guaranteed (except race condition) that the tag exists. Otherwise, the
    # check_call would have failed
    image_id = _get_local_image_id(docker_binary=docker_binary, docker_tag=tag)
    if not image_id:
        raise FileNotFoundError('Unable to find docker image id matching with {}'.format(tag))
    return image_id 
Example 80
Project: DOTA_models   Author: ringringyi   File: build_pip_package.py    Apache License 2.0 4 votes vote down vote up
def main():
  cmd_args = argparse.ArgumentParser()
  cmd_args.add_argument("--include-tensorflow", action="store_true")
  cmd_args.add_argument("--output-dir", required=True)
  args = cmd_args.parse_args()
  if not os.path.isdir(args.output_dir):
    raise EnvironmentError(
        "Output directory {} doesn't exist".format(args.output_dir))
  elif not args.output_dir.startswith("/"):
    raise EnvironmentError("Please pass an absolute path to --output-dir.")

  tmp_packaging = tempfile.mkdtemp()
  runfiles, = (path for path in sys.path
               if path.endswith("build_pip_package.runfiles"))

  # Use the dragnn and tensorflow modules to resolve specific paths in the
  # runfiles directory. Current Bazel puts dragnn in a __main__ subdirectory,
  # for example.
  lib_path = os.path.abspath(dragnn.__file__)
  if runfiles not in lib_path:
    raise EnvironmentError("WARNING: Unexpected PYTHONPATH set by Bazel :(")
  base_dir = os.path.dirname(os.path.dirname(lib_path))
  tensorflow_dir = os.path.dirname(tensorflow.__file__)
  if runfiles not in tensorflow_dir:
    raise EnvironmentError("WARNING: Unexpected tf PYTHONPATH set by Bazel :(")

  # Copy the files.
  subprocess.check_call([
      "cp", "-r", os.path.join(base_dir, "dragnn"), os.path.join(
          base_dir, "syntaxnet"), tmp_packaging
  ])
  if args.include_tensorflow:
    subprocess.check_call(
        ["cp", "-r", tensorflow_dir, tmp_packaging])
  shutil.copy(
      os.path.join(base_dir, "dragnn/tools/oss_setup.py"),
      os.path.join(tmp_packaging, "setup.py"))
  subprocess.check_output(
      ["python", "setup.py", "bdist_wheel"], cwd=tmp_packaging)
  wheel, = glob.glob("{}/*.whl".format(os.path.join(tmp_packaging, "dist")))

  shutil.move(wheel, args.output_dir)
  print(
      "Wrote {}".format(os.path.join(args.output_dir, os.path.basename(wheel))))