diff --git a/.github/workflows/_build_and_publish_documentation.yml b/.github/workflows/_build_and_publish_documentation.yml index 2fed60c7..3f39aa7b 100644 --- a/.github/workflows/_build_and_publish_documentation.yml +++ b/.github/workflows/_build_and_publish_documentation.yml @@ -17,7 +17,7 @@ jobs: with: lfs: true - name: Install uv - uses: astral-sh/setup-uv@v2 + uses: astral-sh/setup-uv@v5 with: enable-cache: true cache-dependency-glob: "uv.lock" diff --git a/.github/workflows/_build_package.yml b/.github/workflows/_build_package.yml index 362d3aaa..5c21af41 100644 --- a/.github/workflows/_build_package.yml +++ b/.github/workflows/_build_package.yml @@ -11,7 +11,7 @@ jobs: with: lfs: true - name: Install uv - uses: astral-sh/setup-uv@v2 + uses: astral-sh/setup-uv@v5 with: enable-cache: true cache-dependency-glob: "uv.lock" diff --git a/.github/workflows/_code_quality.yml b/.github/workflows/_code_quality.yml index d3e65a71..3ef1e817 100644 --- a/.github/workflows/_code_quality.yml +++ b/.github/workflows/_code_quality.yml @@ -9,7 +9,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install uv - uses: astral-sh/setup-uv@v2 + uses: astral-sh/setup-uv@v5 with: enable-cache: true cache-dependency-glob: "uv.lock" @@ -28,7 +28,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install uv - uses: astral-sh/setup-uv@v2 + uses: astral-sh/setup-uv@v5 with: enable-cache: true cache-dependency-glob: "uv.lock" @@ -47,7 +47,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install uv - uses: astral-sh/setup-uv@v2 + uses: astral-sh/setup-uv@v5 with: enable-cache: true cache-dependency-glob: "uv.lock" @@ -66,7 +66,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install uv - uses: astral-sh/setup-uv@v2 + uses: astral-sh/setup-uv@v5 with: enable-cache: true cache-dependency-glob: "uv.lock" diff --git a/.github/workflows/_test.yml b/.github/workflows/_test.yml index 9b8ec653..5115ac23 100644 --- a/.github/workflows/_test.yml +++ b/.github/workflows/_test.yml @@ -18,9 +18,9 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install uv - uses: astral-sh/setup-uv@v2 + uses: astral-sh/setup-uv@v5 with: - enable-cache: true + enable-cache: false cache-dependency-glob: "uv.lock" - name: Install Python ${{ matrix.python.version }} uses: actions/setup-python@v5 diff --git a/.github/workflows/_test_future.yml b/.github/workflows/_test_future.yml index 7e6609aa..6694ca94 100644 --- a/.github/workflows/_test_future.yml +++ b/.github/workflows/_test_future.yml @@ -19,9 +19,9 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install uv - uses: astral-sh/setup-uv@v2 + uses: astral-sh/setup-uv@v5 with: - enable-cache: true + enable-cache: false cache-dependency-glob: "uv.lock" - name: Install Python ${{ matrix.python.version }} uses: actions/setup-python@v5 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0600daab..430e0d13 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,13 +9,12 @@ repos: - id: check-toml - id: check-merge-conflict - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.3 + rev: v0.9.2 hooks: - id: ruff-format - id: ruff - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.13.0 + rev: v1.14.1 hooks: - id: mypy - additional_dependencies: [numpy==1.26.4, torch==2.4.1] exclude: '(.venv|.*_cache)/.*' diff --git a/.vscode/launch.json b/.vscode/launch.json index 23907d42..0797fe8e 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -63,4 +63,4 @@ "justMyCode": true, }, ] -} +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 57142517..86e54207 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -47,6 +47,6 @@ ], "mypy-type-checker.importStrategy": "fromEnvironment", "mypy-type-checker.reportingScope": "workspace", - "mypy-type-checker.preferDaemon": false, + "mypy-type-checker.preferDaemon": true, "ruff.configurationPreference": "filesystemFirst", } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 40222978..b94963a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,25 @@ The changelog format is based on [Keep a Changelog](https://keepachangelog.com/e ## [Unreleased] +### Added +* pyproject.toml : Added keywords + +### Changed +* Updated multiple files to latest changes changes in python_project_template + +### Solved +* Resolved issues raised by `ruff` 0.9.2 + +### Dependencies +* Updated to matplotlib>=3.10 (from matplotlib>=3.9) +* Updated to sphinx-autodoc-typehints>=3.0 (from sphinx-autodoc-typehints>=2.5) +* Updated to pyarrow>=19.0 (from pyarrow>=18.1) +* Updated to mypy>=1.14 (from mypy>=1.13) +* Updated to setup-uv@v5 (from setup-uv@v2) + + ## [0.1.1] - 2024-12-27 + ### Added: Major addition of new qoi method `MarginalCDFExtrapolation`. The following helpers are alse added. * `utils/numerical_precision`: quantify the precision possible with different datatypes diff --git a/CITATION.cff b/CITATION.cff index 2c66564a..731e6b2c 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,31 +1,35 @@ -cff-version: 1.2.0 title: Axtreme -message: Ax for Extremes +version: 0.1.1 +abstract: >- + A toolkit for estimating the long term behaviour + (extremes) of expensive, stochastic, black box functions. type: software authors: - - name: DNV - address: 'Veritasveien 1, 1363 Høvik' - city: Oslo + - name: DNV AS + address: 'Veritasveien 1' + post-code: '1363' + city: Høvik + country: NO website: 'https://www.dnv.com/' - given-names: Sebastian family-names: Winter - email: sebastian.winter@dnv.com affiliation: DNV + email: sebastian.winter@dnv.com - given-names: Kristoffer family-names: Skare - email: kristoffer.skare@dnv.com affiliation: DNV + email: kristoffer.skare@dnv.com - given-names: Magnus family-names: Kristiansen - email: magnus.kristiansen@dnv.com affiliation: DNV -abstract: >- - A toolkit for estimating the long term behaviour - (extremes) of expensive, stochastic, black box functions. + email: magnus.kristiansen@dnv.com keywords: - Design of Experiments - Active Learning - Extreme Response license: MIT -url: "https://github.com/dnv-opensource/axtreme" -version: 0.1.1 +license-url: 'https://dnv-opensource.github.io/axtreme/LICENSE.html' +url: 'https://dnv-opensource.github.io/axtreme/README.html' +repository-code: 'https://github.com/dnv-opensource/axtreme' +message: 'Please cite this software using these metadata.' +cff-version: 1.2.0 diff --git a/LICENSE b/LICENSE index 90b0cc37..5fb9fa46 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2024 [DNV](https://www.dnv.com) [open source](https://github.com/dnv-opensource) +Copyright (c) 2025 [DNV](https://www.dnv.com) [open source](https://github.com/dnv-opensource) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index d09fe588..d7f829bc 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ uv sync > **Note**: Using `--no-dev` will omit installing development dependencies. > **Note**: `uv` will create a new virtual environment called `.venv` in the project root directory when running -> `uv sync` for the first time. Optionally, you can create your own using e.g. `uv venv`, before running +> `uv sync` the first time. Optionally, you can create your own virtual environment using e.g. `uv venv`, before running > `uv sync`. ### 5. (Optional) Install CUDA support diff --git a/docs/source/axtreme.rst b/docs/source/axtreme.rst index e63a16a1..b515588f 100644 --- a/docs/source/axtreme.rst +++ b/docs/source/axtreme.rst @@ -1,5 +1,5 @@ axtreme package -================== +=============== Subpackages ----------- diff --git a/docs/source/conf.py b/docs/source/conf.py index e6f5a3d0..3a5af27e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -23,7 +23,7 @@ project = "axtreme" copyright = "2024, DNV AS. All rights reserved." -author = "Sebastian Winter, Kristoffer Skare" +author = "Sebastian Winter, Kristoffer Skare, Magnus Kristiansen" # The full version, including alpha/beta/rc tags release = "0.1.1" @@ -33,11 +33,10 @@ extensions = [ "myst_parser", - "sphinx.ext.autodoc", # upgrade to autodoc2if want to you myst markup in docstings + "sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx_argparse_cli", "sphinx.ext.mathjax", - "matplotlib.sphinxext.plot_directive", "sphinx.ext.autosummary", "sphinx.ext.todo", "sphinxcontrib.mermaid", @@ -74,9 +73,6 @@ autodoc_default_options = { "member-order": "groupwise", "undoc-members": True, - # "special-members": True, - # TODO(sw 2024-12-5): using "inherited-members" might be a more elegant want to achieve the below. - # "exclude-members": "__weakref__, __init__, __annotations__, __abstractmethods__, __module__, __parameters__, __subclasshook__", "exclude-members": "__weakref__", } autodoc_preserve_defaults = True diff --git a/docs/source/index.rst b/docs/source/index.rst index 4f09d2f0..5635e777 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -23,7 +23,6 @@ axtreme Documentation LICENSE - Indices and tables ================== diff --git a/pyproject.toml b/pyproject.toml index 0061aa68..9d2720d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,9 @@ only-include = [ "tests", ".coveragerc", ".editorconfig", + ".pre-commit-config.yaml", + "manage.py", + "pyproject.toml", "pytest.ini", "ruff.toml", "uv.lock", @@ -37,6 +40,9 @@ maintainers = [ { name = "Jorge Luis Mendez", email = "jorge.luis.mendez@dnv.com" }, ] keywords = [ + "Design of Experiments", + "Active Learning", + "Extreme Response", ] classifiers = [ "Development Status :: 3 - Alpha", @@ -53,8 +59,8 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ - "numpy>=1.26,<2.0", - "matplotlib>=3.9", + "numpy>=1.26, <2.0", + "matplotlib>=3.10", "statsmodels>=0.14.4", "filterpy>=1.4.5", "numba>=0.60.0", @@ -80,20 +86,20 @@ Changelog = "https://github.com/dnv-opensource/axtreme/blob/main/CHANGELOG.md" dev = [ "pytest>=8.3", "pytest-cov>=6.0", - "ruff>=0.8.3", - "pyright>=1.1.390", - "mypy>=1.13", - "sourcery>=1.27", + "ruff>=0.9.2", + "pyright>=1.1.392", + "mypy>=1.14", + "sourcery>=1.31", "pre-commit>=4.0", "Sphinx>=8.1", "sphinx-argparse-cli>=1.19", - "sphinx-autodoc-typehints>=2.5", + "sphinx-autodoc-typehints>=3.0", "sphinxcontrib-mermaid>=1.0.0", "myst-parser>=4.0", "furo>=2024.8", "jupyter>=1.1", "pandas-stubs>=2.2", - "pyarrow>=18.1", + "pyarrow>=19.0", ] [tool.uv] diff --git a/pytest.ini b/pytest.ini index c82d0150..35ac75c0 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,7 +1,7 @@ [pytest] testpaths = tests -addopts = --strict-markers --verbose +addopts = --strict-markers --verbose --durations=10 xfail_strict = True filterwarnings = # Filter out parameters and sklearn deprecation warnings. diff --git a/ruff.toml b/ruff.toml index 65de6bb8..bb1e7257 100644 --- a/ruff.toml +++ b/ruff.toml @@ -33,6 +33,9 @@ ignore = [ "TRY003", # Avoid specifying long messages outside the exception class "PLR1711", # Useless `return` statement at end of function "G00", # Logging statement uses string formatting ('G00' covers all rules flagging string formatting in logging, e.g. G001, G002, etc.) + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes + "PLW0603", # Using the global statement to update {name} is discouraged + "PYI041", # Use `float` instead of `int | float` # Ruff lint rules recommended to keep enabled, # but which are typical candidates you might have a need to ignore, @@ -74,8 +77,6 @@ allowed-confusables = [ [lint.pep8-naming] ignore-names = [ "test_*", - "setUp", - "tearDown", ] [lint.pylint] @@ -94,7 +95,7 @@ raises-require-match-for = [ [lint.per-file-ignores] # `__init__.py` specific ignores "__init__.py" = [ - "D104", # Missing docstring in public package + "D104", # Missing docstring in public package <- @TODO: Reactivate and resolve @ ClaasRostock, 2025-01-20 "F401", # {name} imported but unused (NOTE: ignored as imports in `__init__.py` files are almost never used inside the module, but are intended for namespaces) "I001", # Import block is un-sorted or un-formatted "PLC0414", # Import alias does not rename original package @@ -108,9 +109,11 @@ raises-require-match-for = [ "PLR2004", # Magic value used in comparison "ANN201", # Missing return type annotation for public function "ANN202", # Missing return type annotation for private function + "ARG001", # Unused function argument: {name} + "ARG002", # Unused method argument: {name} "INP001", # File is part of an implicit namespace package. Add an `__init__.py`. (NOTE: tests are not intended to be a module, __init__.py hence not required.) "SLF001", # Private member accessed - "N802", # Function name should be lowercase + "N802", # Function name should be lowercase <- @TODO: Reactivate and resolve @ ClaasRostock, 2025-01-20 "TRY004", # Prefer `TypeError` exception for invalid type ] # `stubs` specific ignores @@ -139,6 +142,7 @@ raises-require-match-for = [ ] # `tutorials` specific ignores "tutorials/**/*" = [ + "D", # Missing docstrings "S101", # Use of assert detected "PLR2004", # Magic value used in comparison "INP001", # File is part of an implicit namespace package. Add an `__init__.py`. (NOTE: tutorials are not intended to be a module, __init__.py hence not required.) @@ -154,6 +158,11 @@ raises-require-match-for = [ "N803", # allow capital variables (e.g X). The botorch interface requireds this "N806", # allow capital variables (e.g X). The botorch interface requireds this ] +# utils/logging specific ignores +"**/utils/logging.py" = [ + "A005", # Module `logging` shadows a Python standard-library module +] + [lint.pydocstyle] convention = "google" diff --git a/src/axtreme/data/batch_invariant_sampler.py b/src/axtreme/data/batch_invariant_sampler.py index 586e250d..dce15328 100644 --- a/src/axtreme/data/batch_invariant_sampler.py +++ b/src/axtreme/data/batch_invariant_sampler.py @@ -111,7 +111,7 @@ def __init__(self, sampler: Sampler[int] | Iterable[int], batch_shape: torch.Siz if partial_batch_data_len % batch_shape[0] != 0: msg = ( f"Final batch will have {partial_batch_data_len} items," - f" which does not fit into batch shape {torch.Size([*batch_shape[:-1] , -1])}" + f" which does not fit into batch shape {torch.Size([*batch_shape[:-1], -1])}" ) raise ValueError(msg) diff --git a/src/axtreme/eval/qoi_helpers.py b/src/axtreme/eval/qoi_helpers.py index ef44d593..4a7bdbe6 100644 --- a/src/axtreme/eval/qoi_helpers.py +++ b/src/axtreme/eval/qoi_helpers.py @@ -66,7 +66,7 @@ def plot_col_histogram(df: pd.DataFrame, ax: Axes, col_name: str = "mean", brute f"mean of dist {values.mean():.3f}." f" std of dist {values.std():.3f}," # Protect against divide by error error - f"C.o.V {values.std()/ values.mean() if values.mean() > 1e-2 else np.nan:.3f}" # noqa: PLR2004 + f"C.o.V {values.std() / values.mean() if values.mean() > 1e-2 else np.nan:.3f}" # noqa: PLR2004 ) _ = ax.set_title(title_str) _ = ax.set_ylabel("density") diff --git a/src/axtreme/evaluation.py b/src/axtreme/evaluation.py index 1a162974..32a46177 100644 --- a/src/axtreme/evaluation.py +++ b/src/axtreme/evaluation.py @@ -144,9 +144,9 @@ def run_simulator( y = self.simulator(x=x, n_simulations_per_point=self.n_simulations_per_point) assert y.ndim == 3, f"simulation_result.ndim must be 3, got: {y.ndim}" # noqa: PLR2004 - assert ( - y.shape[1] == self.n_simulations_per_point - ), f"simulation_result.shape[1] must be {self.n_simulations_per_point}, got: {y.shape[1]}" + assert y.shape[1] == self.n_simulations_per_point, ( + f"simulation_result.shape[1] must be {self.n_simulations_per_point}, got: {y.shape[1]}" + ) assert y.shape[2] == 1, f"simulation_result.shape[2] currenlty only support single output, got: {y.shape[2]}" return y diff --git a/src/axtreme/experiment.py b/src/axtreme/experiment.py index 69434d57..67d7e74e 100644 --- a/src/axtreme/experiment.py +++ b/src/axtreme/experiment.py @@ -207,9 +207,9 @@ def add_simulation_data_to_experiment( """ _: Any runner = experiment.runner - assert isinstance( - runner, LocalMetadataRunner - ), f"Expected experiment.runner to be of type LocalMetadataRunner, got: {type(runner)}" + assert isinstance(runner, LocalMetadataRunner), ( + f"Expected experiment.runner to be of type LocalMetadataRunner, got: {type(runner)}" + ) # Change format of inputs into parameterization dicts parameterizations: list[TParameterization] = [] diff --git a/src/axtreme/sampling/independent_sampler.py b/src/axtreme/sampling/independent_sampler.py index 6f2db05f..0c4a29d4 100644 --- a/src/axtreme/sampling/independent_sampler.py +++ b/src/axtreme/sampling/independent_sampler.py @@ -170,8 +170,7 @@ def _explicit_posterior_shape(cls, posterior: GPyTorchPosterior) -> torch.Size: # We do not expect different distibutions, but the ax/botorch codebase is large, so making this explicity. msg = ( - f"Expected posterior.distribution of type MultivariateNormal or MultitaskMultivariateNormal." - f"Got {dist_type}" + f"Expected posterior.distribution of type MultivariateNormal or MultitaskMultivariateNormal.Got {dist_type}" ) raise ValueError(msg) diff --git a/src/axtreme/utils/modelbridge_utils.py b/src/axtreme/utils/modelbridge_utils.py index 43517e2d..e464db1b 100644 --- a/src/axtreme/utils/modelbridge_utils.py +++ b/src/axtreme/utils/modelbridge_utils.py @@ -33,12 +33,14 @@ def observations_to_arrays( >>> of = [ObservationFeatures(parameters={"x1": x[0], "x2": x[1]}) for x in X] >>> od = [ObservationData(metric_names=["y"], means=y, covariance=np.eye(1)) for y in Y] >>> observations = [Observation(features=f, data=d) for f, d in zip(of, od)] - >>> features, f, cov = observations_to_arrays(param_names=["x1", "x2"], outcomes=["y"], observations=observations) + >>> features, f, cov = observations_to_arrays( + ... param_names=["x1", "x2"], outcomes=["y"], observations=observations + ... ) >>> assert np.array_equal(features, X) >>> assert np.array_equal(f, Y) >>> # There is only one output per x point. This will have perfect covariance with itself. >>> assert np.array_equal(cov, np.ones([3, 1, 1])) - """ # noqa: E501 + """ features_array = modelbridge_utils.observation_features_to_array(param_names, [o.features for o in observations]) f, cov = modelbridge_utils.observation_data_to_array(outcomes, [o.data for o in observations]) return features_array, f, cov diff --git a/src/axtreme/utils/transforms.py b/src/axtreme/utils/transforms.py index be07de34..82a81b22 100644 --- a/src/axtreme/utils/transforms.py +++ b/src/axtreme/utils/transforms.py @@ -154,7 +154,7 @@ def check_transform_not_applied( msg = ( f"expected {transform.__class__}.{parameter_names_store} to be empty/falsey," - f" instead found {getattr(transform,parameter_names_store)} indicating this transform is being used" + f" instead found {getattr(transform, parameter_names_store)} indicating this transform is being used" ) raise AssertionError(msg) diff --git a/tests/acquisition/test_qoi_look_ahead.py b/tests/acquisition/test_qoi_look_ahead.py index b82b2f8c..09ec0bd9 100644 --- a/tests/acquisition/test_qoi_look_ahead.py +++ b/tests/acquisition/test_qoi_look_ahead.py @@ -102,7 +102,7 @@ def test_batch_lookahead_correct_reshape(b: None | int, n: int, d: int, m: int, acqf = QoILookAhead(model=None, qoi_estimator=None) # type: ignore[arg-type] # mock the lookahead function. Simply return the first dimension in y - def lookahead(x_point: torch.Tensor, y_point: torch.Tensor, yvar_point: torch.Tensor | None) -> torch.Tensor: # noqa: ARG001 + def lookahead(x_point: torch.Tensor, y_point: torch.Tensor, yvar_point: torch.Tensor | None) -> torch.Tensor: return y_point[0] acqf.lookahead = lookahead # type: ignore[method-assign] diff --git a/tests/distributions/test_icdf.py b/tests/distributions/test_icdf.py index b3a23ecb..8de4a95c 100644 --- a/tests/distributions/test_icdf.py +++ b/tests/distributions/test_icdf.py @@ -37,7 +37,7 @@ def test_expanded_inputs_produce_expected_results(self): def mock_cdf( dist: Distribution, quantile: float, - max_acceptable_error: float, # noqa: ARG001 + max_acceptable_error: float, bounds: tuple[float, float], ) -> torch.Tensor: """Encodes the inputs into a single tensor which can be tracked though the function. @@ -86,7 +86,7 @@ def test_q_shapes(self, quantile: torch.Tensor, expected_results: torch.Tensor): def mock_cdf( dist: Distribution, quantile: float, - max_acceptable_error: float, # noqa: ARG001 + max_acceptable_error: float, bounds: tuple[float, float], ) -> torch.Tensor: """Encodes the inputs into a single tensor which can be tracked though the function. @@ -135,7 +135,7 @@ def test_bounds_shapes(self, bounds: torch.Tensor, expected_results: torch.Tenso def mock_cdf( dist: Distribution, quantile: float, - max_acceptable_error: float, # noqa: ARG001 + max_acceptable_error: float, bounds: tuple[float, float], ) -> torch.Tensor: """Encodes the inputs into a single tensor which can be tracked though the function. diff --git a/tests/qoi/test_gp_brute_force_system.py b/tests/qoi/test_gp_brute_force_system.py index ce9e6704..4313b6a2 100644 --- a/tests/qoi/test_gp_brute_force_system.py +++ b/tests/qoi/test_gp_brute_force_system.py @@ -263,7 +263,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0913, PLR0912, PLR0915 if run_tests: stats_ground_truth = statistics["ground_truth"] assert abs(stats_ground_truth["best_guess_z"]) < 4.3 * error_tol_scaling - print(f"Ground truth {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"Ground truth {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") ##### Qoi_no_gp """QoI_no_gp testing @@ -297,7 +297,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0913, PLR0912, PLR0915 assert stats_no_gp["var_std"] == pytest.approx(stats_ground_truth["var_std"], rel=0.5 * error_tol_scaling) # fmt: on - print(f"QoI_no_gp {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"QoI_no_gp {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") ##### Highly trained GP """Deterministic GP Expections: @@ -326,7 +326,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0913, PLR0912, PLR0915 stats_gp_deterministic = statistics["qoi_gp_deterministic"] assert abs(stats_gp_deterministic["best_guess_z"]) < 5 * error_tol_scaling - print(f"Deterministic GP {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"Deterministic GP {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") """Gp low variance @@ -362,7 +362,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0913, PLR0912, PLR0915 assert stats_low_uncertainty["var_std"] == pytest.approx(stats_ground_truth["var_std"], rel=0.5 * error_tol_scaling) # noqa: E501 # fmt: on - print(f"Gp low variance {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"Gp low variance {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") """Gp high variance The GP has a larger amount of uncertianty. It is expected to produce moer uncertain results. @@ -385,7 +385,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0913, PLR0912, PLR0915 ) fig_qoi_gp_high_uncertainty.savefig(str(output_dir / "qoi_gp_high_uncertainty.png")) if output_dir else None fig_qoi_gp_high_uncertainty.show() if show_plots else None - print(f"Gp high variance {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"Gp high variance {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") # TODO(sw 2024-12-9): This is a hacky fix so statistics are easily available when calibrating bounds (see bottom of # file), and they are not returned in general (when this is running through pytest). Statistic should probably be diff --git a/tests/qoi/test_marginal_cdf_extrapolation_system.py b/tests/qoi/test_marginal_cdf_extrapolation_system.py index 0e292339..29426807 100644 --- a/tests/qoi/test_marginal_cdf_extrapolation_system.py +++ b/tests/qoi/test_marginal_cdf_extrapolation_system.py @@ -286,7 +286,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0912, PLR0913, PLR0915 if run_tests: stats_ground_truth = statistics["ground_truth"] assert abs(stats_ground_truth["best_guess_z"]) < 4.3 * error_tol_scaling - print(f"Ground truth {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"Ground truth {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") ##### Qoi_no_gp """QoI_no_gp testing @@ -329,7 +329,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0912, PLR0913, PLR0915 assert stats_no_gp["var_std"] == pytest.approx(0, abs=1e-10 * error_tol_scaling) # fmt: on - print(f"QoI_no_gp {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"QoI_no_gp {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") ##### Highly trained GP """Deterministic GP Expections: @@ -363,7 +363,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0912, PLR0913, PLR0915 stats_gp_deterministic = statistics["qoi_gp_deterministic"] assert abs(stats_gp_deterministic["best_guess_z"]) < 5 * error_tol_scaling - print(f"Deterministic GP {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"Deterministic GP {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") """Gp low variance @@ -405,7 +405,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0912, PLR0913, PLR0915 assert stats_low_uncertainty["var_mean"] == pytest.approx(.012, rel=0.1 * error_tol_scaling) # fmt: on - print(f"Gp low variance {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"Gp low variance {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") """Gp high variance The GP has a larger amount of uncertianty. It is expected to produce moer uncertain results. @@ -432,7 +432,7 @@ def test_qoi_brute_force_system_test( # noqa: C901, PLR0912, PLR0913, PLR0915 ) fig_qoi_gp_high_uncertainty.savefig(str(output_dir / "qoi_gp_high_uncertainty.png")) if output_dir else None fig_qoi_gp_high_uncertainty.show() if show_plots else None - print(f"Gp high variance {(time.time()-start_time)//60:.0f}:{time.time()-start_time:.2f}") + print(f"Gp high variance {(time.time() - start_time) // 60:.0f}:{time.time() - start_time:.2f}") # TODO(sw 2024-12-9): This is a hacky fix so statistics are easily available when calibrating bounds (see bottom of # file), and they are not returned in general (when this is running through pytest). Statistic should probably be # saved with the plots as well. diff --git a/tests/sampling/test_independent_sampler.py b/tests/sampling/test_independent_sampler.py index 73bad670..b37ffedf 100644 --- a/tests/sampling/test_independent_sampler.py +++ b/tests/sampling/test_independent_sampler.py @@ -116,7 +116,7 @@ def test_forward_same_samples_used_at_different_n_points_MultivariateNormal(): manual_base_shape = torch.tensor([[1]]) class MockIndependentMCSampler(IndependentMCSampler): - def _construct_base_samples(self, posterior: GPyTorchPosterior) -> None: # noqa: ARG002 + def _construct_base_samples(self, posterior: GPyTorchPosterior) -> None: self.register_buffer("base_samples", manual_base_shape) sampler = MockIndependentMCSampler(sample_shape=torch.Size([1])) diff --git a/tutorials/ax_botorch/botrch_minimal_example_custom_acq.ipynb b/tutorials/ax_botorch/botrch_minimal_example_custom_acq.ipynb index 29de5c25..8408a90f 100644 --- a/tutorials/ax_botorch/botrch_minimal_example_custom_acq.ipynb +++ b/tutorials/ax_botorch/botrch_minimal_example_custom_acq.ipynb @@ -854,8 +854,8 @@ "outputs": [], "source": [ "# For our purposes, the metric is a wrapper that structures the function output.\n", - "class DummyMetric(Metric): # noqa: D101\n", - " def fetch_trial_data(self, trial): # type: ignore # noqa: ANN001, D102\n", + "class DummyMetric(Metric):\n", + " def fetch_trial_data(self, trial): # type: ignore # noqa: ANN001\n", " records = []\n", " for arm_name, arm in trial.arms_by_name.items():\n", " params = arm.parameters\n", @@ -891,8 +891,8 @@ ")\n", "\n", "\n", - "class MyRunner(Runner): # noqa: D101\n", - " def run(self, trial): # type: ignore # noqa: ANN001, D102\n", + "class MyRunner(Runner):\n", + " def run(self, trial): # type: ignore # noqa: ANN001\n", " trial_metadata = {\"name\": str(trial.index)}\n", " return trial_metadata\n", "\n", @@ -1145,7 +1145,7 @@ "outputs": [], "source": [ "for i in range(5):\n", - " print(f\"Running trial {i+1}/30...\")\n", + " print(f\"Running trial {i + 1}/30...\")\n", " parameters, trial_index = ax_client.get_next_trial()\n", " # Local evaluation here can be replaced with deployment to external system.\n", " ax_client.complete_trial(trial_index=trial_index, raw_data=evaluate(parameters))" diff --git a/tutorials/data_guide.py b/tutorials/data_guide.py index dd9e39bc..7b87ad51 100644 --- a/tutorials/data_guide.py +++ b/tutorials/data_guide.py @@ -31,13 +31,13 @@ class MinimalDataset(Dataset[torch.Tensor]): """See axtreme.data for more detailed examples.""" - def __init__(self, data: NDArray[np.float64 | np.int32]) -> None: # noqa: D107 + def __init__(self, data: NDArray[np.float64 | np.int32]) -> None: self.data = data - def __len__(self) -> int: # noqa: D105 + def __len__(self) -> int: return len(self.data) - def __getitem__(self, idx: int) -> torch.Tensor: # noqa: D105 + def __getitem__(self, idx: int) -> torch.Tensor: return torch.from_numpy(self.data[idx, :])