Python tests.__file__() Examples

The following are 8 code examples of tests.__file__(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tests , or try the search function .
Example #1
Source File: env.py    From zing with GNU General Public License v3.0 6 votes vote down vote up
def setup_complex_po(self):
        import tests
        from tests.factories import StoreDBFactory
        from pootle_translationproject.models import TranslationProject

        po_file = os.path.join(
            os.path.dirname(tests.__file__), *("data", "po", "complex.po")
        )
        with open(po_file, "rb") as f:
            ttk = getclass(f)(f.read())

        tp = TranslationProject.objects.get(
            project__code="project0", language__code="language0"
        )

        store = StoreDBFactory(
            parent=tp.directory, translation_project=tp, name="complex.po"
        )
        store.update(ttk) 
Example #2
Source File: test_model_export_with_class_and_artifacts.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_add_to_model_adds_specified_kwargs_to_mlmodel_configuration():
    custom_kwargs = {
        "key1": "value1",
        "key2": 20,
        "key3": range(10),
    }
    model_config = Model()
    mlflow.pyfunc.add_to_model(model=model_config,
                               loader_module=os.path.basename(__file__)[:-3],
                               data="data",
                               code="code",
                               env=None,
                               **custom_kwargs)

    assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors
    assert all([item in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME] for item in custom_kwargs]) 
Example #3
Source File: __init__.py    From fence with Apache License 2.0 5 votes vote down vote up
def read_file(filename):
    """Read the contents of a file in the tests directory."""
    root_dir = os.path.dirname(os.path.realpath(tests.__file__))
    with open(os.path.join(root_dir, filename), "r") as f:
        return f.read() 
Example #4
Source File: test_spark.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def test_spark_udf(spark, model_path):
    mlflow.pyfunc.save_model(
        path=model_path,
        loader_module=__name__,
        code_path=[os.path.dirname(tests.__file__)],
    )
    reloaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_path)

    pandas_df = pd.DataFrame(data=np.ones((10, 10)), columns=[str(i) for i in range(10)])
    spark_df = spark.createDataFrame(pandas_df)

    # Test all supported return types
    type_map = {"float": (FloatType(), np.number),
                "int": (IntegerType(), np.int32),
                "double": (DoubleType(), np.number),
                "long": (LongType(), np.int),
                "string": (StringType(), None)}

    for tname, tdef in type_map.items():
        spark_type, np_type = tdef
        prediction_df = reloaded_pyfunc_model.predict(pandas_df)
        for is_array in [True, False]:
            t = ArrayType(spark_type) if is_array else spark_type
            if tname == "string":
                expected = prediction_df.applymap(str)
            else:
                expected = prediction_df.select_dtypes(np_type)
                if tname == "float":
                    expected = expected.astype(np.float32)

            expected = [list(row[1]) if is_array else row[1][0] for row in expected.iterrows()]
            pyfunc_udf = spark_udf(spark, model_path, result_type=t)
            new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
            actual = list(new_df.select("prediction").toPandas()['prediction'])
            assert expected == actual
            if not is_array:
                pyfunc_udf = spark_udf(spark, model_path, result_type=tname)
                new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns))
                actual = list(new_df.select("prediction").toPandas()['prediction'])
                assert expected == actual 
Example #5
Source File: test_spark.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def test_model_cache(spark, model_path):
    mlflow.pyfunc.save_model(
        path=model_path,
        loader_module=__name__,
        code_path=[os.path.dirname(tests.__file__)],
    )

    archive_path = SparkModelCache.add_local_model(spark, model_path)
    assert archive_path != model_path

    # Ensure we can use the model locally.
    local_model = SparkModelCache.get_or_load(archive_path)
    assert isinstance(local_model, PyFuncModel)
    assert isinstance(local_model._model_impl, ConstantPyfuncWrapper)

    # Define the model class name as a string so that each Spark executor can reference it
    # without attempting to resolve ConstantPyfuncWrapper, which is only available on the driver.
    constant_model_name = ConstantPyfuncWrapper.__name__

    # Request the model on all executors, and see how many times we got cache hits.
    def get_model(_):
        model = SparkModelCache.get_or_load(archive_path)
        assert (isinstance(model, PyFuncModel))
        # NB: Can not use instanceof test as remote does not know about ConstantPyfuncWrapper class.
        assert type(model._model_impl).__name__ == constant_model_name
        return SparkModelCache._cache_hits

    # This will run 30 distinct tasks, and we expect most to reuse an already-loaded model.
    # Note that we can't necessarily expect an even split, or even that there were only
    # exactly 2 python processes launched, due to Spark and its mysterious ways, but we do
    # expect significant reuse.
    results = spark.sparkContext.parallelize(range(0, 100), 30).map(get_model).collect()

    # TODO(tomas): Looks like spark does not reuse python workers with python==3.x
    assert sys.version[0] == '3' or max(results) > 10
    # Running again should see no newly-loaded models.
    results2 = spark.sparkContext.parallelize(range(0, 100), 30).map(get_model).collect()
    assert sys.version[0] == '3' or min(results2) > 0 
Example #6
Source File: test_model_export_with_class_and_artifacts.py    From mlflow with Apache License 2.0 5 votes vote down vote up
def test_pyfunc_model_serving_without_conda_env_activation_succeeds_with_module_scoped_class(
        sklearn_knn_model, iris_data, tmpdir):
    sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
    mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)

    def test_predict(sk_model, model_input):
        return sk_model.predict(model_input) * 2

    pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
    mlflow.pyfunc.save_model(path=pyfunc_model_path,
                             artifacts={
                                 "sk_model": sklearn_model_path
                             },
                             python_model=ModuleScopedSklearnModel(test_predict),
                             code_path=[os.path.dirname(tests.__file__)],
                             conda_env=_conda_env())
    loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path)

    sample_input = pd.DataFrame(iris_data[0])
    scoring_response = pyfunc_serve_and_score_model(
        model_uri=pyfunc_model_path,
        data=sample_input,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
        extra_args=["--no-conda"])
    assert scoring_response.status_code == 200
    np.testing.assert_array_equal(
        np.array(json.loads(scoring_response.text)),
        loaded_pyfunc_model.predict(sample_input)) 
Example #7
Source File: test_execute.py    From infrared with Apache License 2.0 5 votes vote down vote up
def spec_fixture():
    """Generates plugin spec for testing, using tests/example plugin dir. """
    plugin_dir = path.join(path.abspath(path.dirname(tests.__file__)),
                           'example')
    test_plugin = plugins.InfraredPlugin(plugin_dir=plugin_dir)
    from infrared.api import InfraredPluginsSpec
    spec = InfraredPluginsSpec(test_plugin)
    yield spec 
Example #8
Source File: test_execute.py    From infrared with Apache License 2.0 5 votes vote down vote up
def test_execute_main_role_path(spec_fixture, workspace_manager_fixture, # noqa
                                test_workspace, input_value, input_roles):
    """Verify execution runs the main.yml playbook when roles_path is set.

    Workflow is the same as in the test_execute_main test, however, the plugin
    used here has config.roles_path set.

    Verifies that ANSIBLE_ROLES_PATH is set before plugin's main.yml execution
    and it's restored to the original value after the plugin execution is over.
    """
    input_string = ['example']

    # get the plugin with role_path defined
    role_path_plugin = 'example/plugins/plugin_with_role_path/infrared/plugin'
    plugin_dir = path.join(path.abspath(path.dirname(tests.__file__)),
                           role_path_plugin)
    test_plugin = plugins.InfraredPlugin(plugin_dir=plugin_dir)
    from infrared.api import InfraredPluginsSpec
    spec = InfraredPluginsSpec(test_plugin)

    spec_manager = api.SpecManager()
    spec_manager.register_spec(spec)

    inventory_dir = test_workspace.path
    output_file = "output.example"
    environ['ANSIBLE_ROLES_PATH'] = input_value
    assert not path.exists(path.join(inventory_dir, output_file))
    assert not path.exists(path.join(inventory_dir, "role_" + output_file))

    workspace_manager_fixture.activate(test_workspace.name)
    return_value = spec_manager.run_specs(args=input_string)
    out_file = open(path.join(inventory_dir, output_file), "r")

    expected_resp = 'ANSIBLE_ROLES_PATH=' + input_roles
    expected_resp += path.join(plugin_dir, test_plugin.roles_path + '../')

    assert return_value == 0
    assert environ.get('ANSIBLE_ROLES_PATH', '') == input_value
    assert path.exists(path.join(inventory_dir, output_file))
    assert out_file.read() == expected_resp