diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4cfb001b..4179ce3d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ### Issues -Use [GitHub Issues](https://github.com/photosynthesis-team/photosynthesis.metrics/issues) for bug reports and feature requests. +Use [GitHub Issues](https://github.com/photosynthesis-team/piq/issues) for bug reports and feature requests. ### Step-by-step guide diff --git a/README.md b/README.md index d105711e..54df6a50 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,29 @@ -# PhotoSynthesis.Metrics -![CI flake-8 style check][ci-flake-8-style-check-shield] -![CI testing][ci-testing] -[![MIT License][license-shield]][license-url] -[![LinkedIn][linkedin-shield]][linkedin-url] +
+ +# PyTorch Image Quality +[![License][license-shield]][license-url] [![PyPI version][pypi-version-shield]][pypi-version-url] +![CI flake-8 style check][ci-flake-8-style-check-shield] +![CI testing][ci-testing] [![Quality Gate Status][quality-gate-status-shield]][quality-gate-status-url] [![Maintainability Rating][maintainability-raiting-shield]][maintainability-raiting-url] [![Reliability Rating][reliability-rating-badge]][reliability-rating-url] - +
-PyTorch library with measures and metrics for various image-to-image tasks like denoising, super-resolution, -image generation etc. This easy to use yet flexible and extensive library is developed with focus on reliability -and reproducibility of results. Use your favourite measures as losses for training neural networks with ready-to-use -PyTorch modules. - +Collection of measures and metrics for automatic image quality assessment in various image-to-image tasks such as +denoising, super-resolution, image generation etc. +This easy to use yet flexible and extensive library is developed with focus on reliability and +reproducibility of results. +Use your favourite measures as losses for training neural networks with ready-to-use PyTorch modules. ### Getting started ```python import torch -from photosynthesis_metrics import ssim +from piq import ssim prediction = torch.rand(3, 3, 256, 256) target = torch.rand(3, 3, 256, 256) @@ -35,7 +36,7 @@ ssim_index = ssim(prediction, target, data_range=1.)
-Peak Signal-to-Noise Ration (PSNR) +Peak Signal-to-Noise Ratio (PSNR)

To compute PSNR as a measure, use lower case function from the library. @@ -44,7 +45,7 @@ You can specify other reduction methods by `reduction` flag. ```python import torch -from photosynthesis_metrics import psnr +from piq import psnr from typing import Union, Tuple prediction = torch.rand(3, 3, 256, 256) @@ -65,7 +66,7 @@ Note: Colour images are first converted to YCbCr format and only luminance compo To compute SSIM index as a measure, use lower case function from the library: ```python import torch -from photosynthesis_metrics import ssim +from piq import ssim from typing import Union, Tuple prediction = torch.rand(3, 3, 256, 256) @@ -76,7 +77,7 @@ ssim_index: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] = ssim(predic In order to use SSIM as a loss function, use corresponding PyTorch module: ```python import torch -from photosynthesis_metrics import SSIMLoss +from piq import SSIMLoss loss = SSIMLoss(data_range=1.) prediction = torch.rand(3, 3, 256, 256, requires_grad=True) @@ -95,7 +96,7 @@ output.backward() To compute MS-SSIM index as a measure, use lower case function from the library: ```python import torch -from photosynthesis_metrics import multi_scale_ssim +from piq import multi_scale_ssim prediction = torch.rand(3, 3, 256, 256) target = torch.rand(3, 3, 256, 256) @@ -105,7 +106,7 @@ ms_ssim_index: torch.Tensor = multi_scale_ssim(prediction, target, data_range=1. In order to use MS-SSIM as a loss function, use corresponding PyTorch module: ```python import torch -from photosynthesis_metrics import MultiScaleSSIMLoss +from piq import MultiScaleSSIMLoss loss = MultiScaleSSIMLoss(data_range=1.) prediction = torch.rand(3, 3, 256, 256, requires_grad=True) @@ -124,7 +125,7 @@ output.backward() To compute TV as a measure, use lower case function from the library: ```python import torch -from photosynthesis_metrics import total_variation +from piq import total_variation data = torch.rand(3, 3, 256, 256) tv: torch.Tensor = total_variation(data) @@ -133,7 +134,7 @@ tv: torch.Tensor = total_variation(data) In order to use TV as a loss function, use corresponding PyTorch module: ```python import torch -from photosynthesis_metrics import TVLoss +from piq import TVLoss loss = TVLoss() prediction = torch.rand(3, 3, 256, 256, requires_grad=True) @@ -151,7 +152,7 @@ output.backward() To compute VIF as a measure, use lower case function from the library: ```python import torch -from photosynthesis_metrics import vif_p +from piq import vif_p predicted = torch.rand(3, 3, 256, 256) target = torch.rand(3, 3, 256, 256) @@ -161,12 +162,12 @@ vif: torch.Tensor = vif_p(predicted, target, data_range=1.) In order to use VIF as a loss function, use corresponding PyTorch class: ```python import torch -from photosynthesis_metrics import VIFLoss +from piq import VIFLoss loss = VIFLoss(sigma_n_sq=2.0, data_range=1.) prediction = torch.rand(3, 3, 256, 256, requires_grad=True) target = torch.rand(3, 3, 256, 256) -ouput: torch.Tensor = loss(prediction, target) +output: torch.Tensor = loss(prediction, target) output.backward() ``` @@ -184,12 +185,12 @@ It can be used both as a measure and as a loss function. In any case it should m Usually values of GMSD lie in [0, 0.35] interval. ```python import torch -from photosynthesis_metrics import GMSDLoss +from piq import GMSDLoss loss = GMSDLoss(data_range=1.) prediction = torch.rand(3, 3, 256, 256, requires_grad=True) target = torch.rand(3, 3, 256, 256) -ouput: torch.Tensor = loss(prediction, target) +output: torch.Tensor = loss(prediction, target) output.backward() ```

@@ -204,12 +205,12 @@ It can be used both as a measure and as a loss function. In any case it should m By defualt scale weights are initialized with values from the paper. You can change them by passing a list of 4 variables to `scale_weights` argument during initialization. Both GMSD and MS-GMSD computed for greyscale images, but to take contrast changes into account authors propoced to also add chromatic component. Use flag `chromatic` to use MS-GMSDc version of the loss ```python import torch -from photosynthesis_metrics import MultiScaleGMSDLoss +from piq import MultiScaleGMSDLoss loss = MultiScaleGMSDLoss(chromatic=True, data_range=1.) prediction = torch.rand(3, 3, 256, 256, requires_grad=True) target = torch.rand(3, 3, 256, 256) -ouput: torch.Tensor = loss(prediction, target) +output: torch.Tensor = loss(prediction, target) output.backward() ```

@@ -223,7 +224,7 @@ output.backward() To compute [BRISQUE score](https://live.ece.utexas.edu/publications/2012/TIP%20BRISQUE.pdf) as a measure, use lower case function from the library: ```python import torch -from photosynthesis_metrics import brisque +from piq import brisque from typing import Union, Tuple prediction = torch.rand(3, 3, 256, 256) @@ -233,7 +234,7 @@ brisque_index: torch.Tensor = brisque(prediction, data_range=1.) In order to use BRISQUE as a loss function, use corresponding PyTorch module: ```python import torch -from photosynthesis_metrics import BRISQUELoss +from piq import BRISQUELoss loss = BRISQUELoss(data_range=1.) prediction = torch.rand(3, 3, 256, 256, requires_grad=True) @@ -252,7 +253,7 @@ Use `MSID` class to compute [MSID score](https://arxiv.org/abs/1905.11141) from pre-extracted from some feature extractor network: ```python import torch -from photosynthesis_metrics import MSID +from piq import MSID msid_metric = MSID() prediction_feats = torch.rand(10000, 1024) @@ -265,7 +266,7 @@ Please note that `_compute_feats` consumes a data loader of predefined format. ```python import torch from torch.utils.data import DataLoader -from photosynthesis_metrics import MSID +from piq import MSID first_dl, second_dl = DataLoader(), DataLoader() msid_metric = MSID() @@ -285,7 +286,7 @@ Use `FID` class to compute [FID score](https://arxiv.org/abs/1706.08500) from im pre-extracted from some feature extractor network: ```python import torch -from photosynthesis_metrics import FID +from piq import FID fid_metric = FID() prediction_feats = torch.rand(10000, 1024) @@ -298,7 +299,7 @@ Please note that `_compute_feats` consumes a data loader of predefined format. ```python import torch from torch.utils.data import DataLoader -from photosynthesis_metrics import FID +from piq import FID first_dl, second_dl = DataLoader(), DataLoader() fid_metric = FID() @@ -318,7 +319,7 @@ Use `KID` class to compute [KID score](https://arxiv.org/abs/1801.01401) from im pre-extracted from some feature extractor network: ```python import torch -from photosynthesis_metrics import KID +from piq import KID kid_metric = KID() prediction_feats = torch.rand(10000, 1024) @@ -331,7 +332,7 @@ Please note that `_compute_feats` consumes a data loader of predefined format. ```python import torch from torch.utils.data import DataLoader -from photosynthesis_metrics import KID +from piq import KID first_dl, second_dl = DataLoader(), DataLoader() kid_metric = KID() @@ -351,7 +352,7 @@ Use `GS` class to compute [Geometry Score](https://arxiv.org/abs/1802.02664) fro pre-extracted from some feature extractor network. Computation is heavily CPU dependent, adjust `num_workers` parameter according to your system configuration: ```python import torch -from photosynthesis_metrics import GS +from piq import GS gs_metric = GS(sample_size=64, num_iters=100, i_max=100, num_workers=4) prediction_feats = torch.rand(10000, 1024) @@ -373,16 +374,16 @@ Use `inception_score` function to compute [IS](https://arxiv.org/abs/1606.03498) pre-extracted from some feature extractor network. Note, that we follow recomendations from paper [A Note on the Inception Score](https://arxiv.org/pdf/1801.01973.pdf), which proposed small modification to original algorithm: ```python import torch -from photosynthesis_metrics import inception_score +from piq import inception_score prediction_feats = torch.rand(10000, 1024) -mean: torch.Tensor, variance: torch.Tensor = inception_score(prediction_feats, num_splits=10) +mean, variance = inception_score(prediction_feats, num_splits=10) ``` To compute difference between IS for 2 sets of image features, use `IS` class. ```python import torch -from photosynthesis_metrics import IS +from piq import IS is_metric = IS(distance='l1') @@ -407,7 +408,7 @@ distance: torch.Tensor = is_metric(prediction_feats, target_feats) ### Overview -*PhotoSynthesis.Metrics* helps you to concentrate on your experiments without the boilerplate code. +*PyTorch Image Quality* (former [PhotoSynthesis.Metrics](https://pypi.org/project/photosynthesis-metrics/0.4.0/)) helps you to concentrate on your experiments without the boilerplate code. The library contains a set of measures and metrics that is constantly getting extended. For measures/metrics that can be used as loss functions, corresponding PyTorch modules are implemented. @@ -415,17 +416,17 @@ For measures/metrics that can be used as loss functions, corresponding PyTorch m #### Installation -`$ pip install photosynthesis-metrics` +`$ pip install piq` If you want to use the latest features straight from the master, clone the repo: ```sh -$ git clone https://github.com/photosynthesis-team/photosynthesis.metrics.git +$ git clone https://github.com/photosynthesis-team/piq.git ``` #### Roadmap -See the [open issues](https://github.com/photosynthesis-team/photosynthesis.metrics/issues) for a list of proposed +See the [open issues](https://github.com/photosynthesis-team/piq/issues) for a list of proposed features and known issues. @@ -438,7 +439,7 @@ features and known issues. We appreciate all contributions. If you plan to: - contribute back bug-fixes, please do so without any further discussion -- close one of [open issues](https://github.com/photosynthesis-team/photosynthesis.metrics/issues), please do so if no one has been assigned to it +- close one of [open issues](https://github.com/photosynthesis-team/piq/issues), please do so if no one has been assigned to it - contribute new features, utility functions or extensions, please first open an issue and discuss the feature with us Please see the [contribution guide](CONTRIBUTING.md) for more information. @@ -449,7 +450,7 @@ Please see the [contribution guide](CONTRIBUTING.md) for more information. **Sergey Kastryulin** - [@snk4tr](https://github.com/snk4tr) - `snk4tr@gmail.com` -Project Link: [https://github.com/photosynthesis-team/photosynthesis.metrics](https://github.com/photosynthesis-team/photosynthesis.metrics) +Project Link: [https://github.com/photosynthesis-team/piq](https://github.com/photosynthesis-team/piq) PhotoSynthesis Team: [https://github.com/photosynthesis-team](https://github.com/photosynthesis-team) Other projects by PhotoSynthesis Team: @@ -467,13 +468,11 @@ Other projects by PhotoSynthesis Team: [license-shield]: https://img.shields.io/badge/License-Apache%202.0-blue.svg -[license-url]: https://github.com/photosynthesis-team/photosynthesis.metrics/blob/master/LICENSE -[linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=flat-square&logo=linkedin&colorB=555 -[linkedin-url]: https://www.linkedin.com/in/sergey-kastryulin/ -[ci-flake-8-style-check-shield]: https://github.com/photosynthesis-team/photosynthesis.metrics/workflows/flake-8%20style%20check/badge.svg -[ci-testing]: https://github.com/photosynthesis-team/photosynthesis.metrics/workflows/testing/badge.svg -[pypi-version-shield]: https://badge.fury.io/py/photosynthesis-metrics.svg -[pypi-version-url]: https://badge.fury.io/py/photosynthesis-metrics +[license-url]: https://github.com/photosynthesis-team/piq/blob/master/LICENSE +[ci-flake-8-style-check-shield]: https://github.com/photosynthesis-team/piq/workflows/flake-8%20style%20check/badge.svg +[ci-testing]: https://github.com/photosynthesis-team/piq/workflows/testing/badge.svg +[pypi-version-shield]: https://badge.fury.io/py/piq.svg +[pypi-version-url]: https://badge.fury.io/py/piq [quality-gate-status-shield]: https://sonarcloud.io/api/project_badges/measure?project=photosynthesis-team_photosynthesis.metrics&metric=alert_status [quality-gate-status-url]: https://sonarcloud.io/dashboard?id=photosynthesis-team_photosynthesis.metrics [maintainability-raiting-shield]: https://sonarcloud.io/api/project_badges/measure?project=photosynthesis-team_photosynthesis.metrics&metric=sqale_rating diff --git a/photosynthesis_metrics/__init__.py b/piq/__init__.py similarity index 94% rename from photosynthesis_metrics/__init__.py rename to piq/__init__.py index 17eb7e94..8717e855 100644 --- a/photosynthesis_metrics/__init__.py +++ b/piq/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.4.0" +__version__ = "0.4.1" from .ssim import ssim, multi_scale_ssim, SSIMLoss, MultiScaleSSIMLoss from .msid import MSID diff --git a/photosynthesis_metrics/base.py b/piq/base.py similarity index 94% rename from photosynthesis_metrics/base.py rename to piq/base.py index b75f49f7..d39d9b98 100644 --- a/photosynthesis_metrics/base.py +++ b/piq/base.py @@ -1,7 +1,7 @@ import torch -from photosynthesis_metrics.feature_extractors.fid_inception import InceptionV3 -from photosynthesis_metrics.utils import _validate_features +from piq.feature_extractors.fid_inception import InceptionV3 +from piq.utils import _validate_features class BaseFeatureMetric(torch.nn.Module): diff --git a/photosynthesis_metrics/brisque.py b/piq/brisque.py similarity index 98% rename from photosynthesis_metrics/brisque.py rename to piq/brisque.py index 03db15ca..29819b12 100644 --- a/photosynthesis_metrics/brisque.py +++ b/piq/brisque.py @@ -12,7 +12,7 @@ from torch.nn.modules.loss import _Loss from torch.utils.model_zoo import load_url import torch.nn.functional as F -from photosynthesis_metrics.utils import _adjust_dimensions, _validate_input +from piq.utils import _adjust_dimensions, _validate_input def _ggd_parameters(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: @@ -138,7 +138,7 @@ def _RBF_kernel(features: torch.Tensor, sv: torch.Tensor, gamma: float = 0.05) - def _score_svr(features: torch.Tensor) -> torch.Tensor: - url = 'https://github.com/photosynthesis-team/photosynthesis.metrics/' \ + url = 'https://github.com/photosynthesis-team/piq/' \ 'releases/download/v0.4.0/brisque_svm_weights.pt' sv_coef, sv = load_url(url, map_location=features.device) diff --git a/photosynthesis_metrics/feature_extractors/__init__.py b/piq/feature_extractors/__init__.py similarity index 100% rename from photosynthesis_metrics/feature_extractors/__init__.py rename to piq/feature_extractors/__init__.py diff --git a/photosynthesis_metrics/feature_extractors/fid_inception.py b/piq/feature_extractors/fid_inception.py similarity index 100% rename from photosynthesis_metrics/feature_extractors/fid_inception.py rename to piq/feature_extractors/fid_inception.py diff --git a/photosynthesis_metrics/fid.py b/piq/fid.py similarity index 99% rename from photosynthesis_metrics/fid.py rename to piq/fid.py index 40ea7abb..b6bceb4d 100644 --- a/photosynthesis_metrics/fid.py +++ b/piq/fid.py @@ -11,7 +11,7 @@ from typing import Tuple import torch -from photosynthesis_metrics.base import BaseFeatureMetric +from piq.base import BaseFeatureMetric def _approximation_error(A: torch.Tensor, sA: torch.Tensor) -> torch.Tensor: diff --git a/photosynthesis_metrics/gmsd.py b/piq/gmsd.py similarity index 99% rename from photosynthesis_metrics/gmsd.py rename to piq/gmsd.py index e9fc4b08..f7aa8caf 100644 --- a/photosynthesis_metrics/gmsd.py +++ b/piq/gmsd.py @@ -14,7 +14,7 @@ import torch.nn.functional as F from torch.nn.modules.loss import _Loss -from photosynthesis_metrics.utils import _adjust_dimensions, _validate_input +from piq.utils import _adjust_dimensions, _validate_input def _prewitt_filter() -> torch.Tensor: diff --git a/photosynthesis_metrics/gs.py b/piq/gs.py similarity index 99% rename from photosynthesis_metrics/gs.py rename to piq/gs.py index 48d27ba2..43af27d3 100644 --- a/photosynthesis_metrics/gs.py +++ b/piq/gs.py @@ -12,7 +12,7 @@ import torch import numpy as np -from photosynthesis_metrics.base import BaseFeatureMetric +from piq.base import BaseFeatureMetric def relative(intervals: np.ndarray, alpha_max: float, i_max: int = 100) -> np.ndarray: diff --git a/photosynthesis_metrics/isc.py b/piq/isc.py similarity index 98% rename from photosynthesis_metrics/isc.py rename to piq/isc.py index 5d380f6c..a55d63ab 100644 --- a/photosynthesis_metrics/isc.py +++ b/piq/isc.py @@ -14,7 +14,7 @@ import torch import torch.nn.functional as F -from photosynthesis_metrics.base import BaseFeatureMetric +from piq.base import BaseFeatureMetric def inception_score(features: torch.Tensor, num_splits: int = 10): diff --git a/photosynthesis_metrics/kid.py b/piq/kid.py similarity index 99% rename from photosynthesis_metrics/kid.py rename to piq/kid.py index eb3d5500..1da15e9c 100644 --- a/photosynthesis_metrics/kid.py +++ b/piq/kid.py @@ -2,7 +2,7 @@ import torch -from photosynthesis_metrics.base import BaseFeatureMetric +from piq.base import BaseFeatureMetric def _polynomial_kernel( diff --git a/photosynthesis_metrics/msid.py b/piq/msid.py similarity index 99% rename from photosynthesis_metrics/msid.py rename to piq/msid.py index bfd94b11..66f47f25 100644 --- a/photosynthesis_metrics/msid.py +++ b/piq/msid.py @@ -8,7 +8,7 @@ from scipy.sparse import lil_matrix, diags, eye -from photosynthesis_metrics.base import BaseFeatureMetric +from piq.base import BaseFeatureMetric EPSILON = 1e-6 NORMALIZATION = 1e6 diff --git a/photosynthesis_metrics/psnr.py b/piq/psnr.py similarity index 88% rename from photosynthesis_metrics/psnr.py rename to piq/psnr.py index 9beebb04..b1fe671b 100644 --- a/photosynthesis_metrics/psnr.py +++ b/piq/psnr.py @@ -1,14 +1,14 @@ -r""" This module implements Peak Signal-to-Noise Ration (PSNR) in PyTorch. +r""" This module implements Peak Signal-to-Noise Ratio (PSNR) in PyTorch. """ import torch from typing import Optional, Union -from photosynthesis_metrics.utils import _validate_input, _adjust_dimensions +from piq.utils import _validate_input, _adjust_dimensions def psnr(x: torch.Tensor, y: torch.Tensor, data_range: Union[int, float] = 1.0, reduction: Optional[str] = 'mean', convert_to_greyscale: bool = False): - r"""Compute Peak Signal-to-Noise Ration for a batch of images. + r"""Compute Peak Signal-to-Noise Ratio for a batch of images. Supports both greyscale and color images with RGB channel order. Args: diff --git a/photosynthesis_metrics/ssim.py b/piq/ssim.py similarity index 99% rename from photosynthesis_metrics/ssim.py rename to piq/ssim.py index 1f130bab..c07372e1 100644 --- a/photosynthesis_metrics/ssim.py +++ b/piq/ssim.py @@ -12,7 +12,7 @@ import torch.nn.functional as f from torch.nn.modules.loss import _Loss -from photosynthesis_metrics.utils import _adjust_dimensions, _validate_input +from piq.utils import _adjust_dimensions, _validate_input def ssim(x: torch.Tensor, y: torch.Tensor, kernel_size: int = 11, kernel_sigma: float = 1.5, diff --git a/photosynthesis_metrics/tv.py b/piq/tv.py similarity index 98% rename from photosynthesis_metrics/tv.py rename to piq/tv.py index 095d7485..1797b14d 100644 --- a/photosynthesis_metrics/tv.py +++ b/piq/tv.py @@ -4,7 +4,7 @@ import torch from torch.nn.modules.loss import _Loss -from photosynthesis_metrics.utils import _validate_input, _adjust_dimensions +from piq.utils import _validate_input, _adjust_dimensions def total_variation(x: torch.Tensor, size_average: bool = True, reduction_type: str = 'l2') -> torch.Tensor: diff --git a/photosynthesis_metrics/utils.py b/piq/utils.py similarity index 100% rename from photosynthesis_metrics/utils.py rename to piq/utils.py diff --git a/photosynthesis_metrics/vif.py b/piq/vif.py similarity index 98% rename from photosynthesis_metrics/vif.py rename to piq/vif.py index 49f675d0..751d650d 100644 --- a/photosynthesis_metrics/vif.py +++ b/piq/vif.py @@ -10,7 +10,7 @@ import torch.nn.functional as F from typing import Union -from photosynthesis_metrics.utils import _adjust_dimensions, _validate_input +from piq.utils import _adjust_dimensions, _validate_input def _gaussian_kernel2d(kernel_size: int = 5, sigma: float = 2.0) -> torch.Tensor: diff --git a/setup.py b/setup.py index db9af170..8558895f 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ def get_version(rel_path: str) -> str: long_description = f.read() required = g.read().splitlines() -package_name = 'photosynthesis_metrics' +package_name = 'piq' setuptools.setup( name=package_name, version=get_version(os.path.join(package_name, '__init__.py')), @@ -33,7 +33,7 @@ def get_version(rel_path: str) -> str: description="Measures and metrics for image2image tasks. PyTorch.", long_description=long_description, long_description_content_type="text/markdown", - url="https://github.com/photosynthesis-team/photosynthesis.metrics", + url="https://github.com/photosynthesis-team/piq", install_requires=required, packages=setuptools.find_packages(), classifiers=[ diff --git a/tests/test_brisque.py b/tests/test_brisque.py index 45f82207..7b50e39b 100644 --- a/tests/test_brisque.py +++ b/tests/test_brisque.py @@ -2,7 +2,7 @@ import pytest from libsvm import svmutil # noqa: F401 from brisque import BRISQUE -from photosynthesis_metrics import brisque, BRISQUELoss +from piq import brisque, BRISQUELoss @pytest.fixture(scope='module') diff --git a/tests/test_fid.py b/tests/test_fid.py index 81109c25..51192f76 100644 --- a/tests/test_fid.py +++ b/tests/test_fid.py @@ -1,8 +1,8 @@ import pytest import torch -from photosynthesis_metrics import FID -from photosynthesis_metrics.feature_extractors.fid_inception import InceptionV3 +from piq import FID +from piq.feature_extractors.fid_inception import InceptionV3 class TestDataset(torch.utils.data.Dataset): diff --git a/tests/test_gmsd.py b/tests/test_gmsd.py index dbd83539..fc7998ea 100644 --- a/tests/test_gmsd.py +++ b/tests/test_gmsd.py @@ -1,7 +1,7 @@ import torch import pytest -from photosynthesis_metrics import GMSDLoss, MultiScaleGMSDLoss +from piq import GMSDLoss, MultiScaleGMSDLoss @pytest.fixture(scope='module') diff --git a/tests/test_gs.py b/tests/test_gs.py index 5515db92..635255d0 100644 --- a/tests/test_gs.py +++ b/tests/test_gs.py @@ -3,7 +3,7 @@ import pytest import torch -from photosynthesis_metrics import GS +from piq import GS try: import gudhi # noqa except ImportError: diff --git a/tests/test_is.py b/tests/test_is.py index 03301fdf..90045418 100644 --- a/tests/test_is.py +++ b/tests/test_is.py @@ -4,7 +4,7 @@ import numpy as np from scipy.stats import entropy -from photosynthesis_metrics import IS, inception_score +from piq import IS, inception_score # Same as in https://github.com/sbarratt/inception-score-pytorch diff --git a/tests/test_kid.py b/tests/test_kid.py index 0b0805f3..a89fc7e5 100644 --- a/tests/test_kid.py +++ b/tests/test_kid.py @@ -1,7 +1,7 @@ import pytest import torch -from photosynthesis_metrics import KID +from piq import KID @pytest.fixture(scope='module') diff --git a/tests/test_msid.py b/tests/test_msid.py index e53b08e3..d109b71c 100644 --- a/tests/test_msid.py +++ b/tests/test_msid.py @@ -1,8 +1,8 @@ import torch import pytest -from photosynthesis_metrics import MSID -from photosynthesis_metrics.feature_extractors.fid_inception import InceptionV3 +from piq import MSID +from piq.feature_extractors.fid_inception import InceptionV3 class TestDataset(torch.utils.data.Dataset): diff --git a/tests/test_psnr.py b/tests/test_psnr.py index da3e625b..69f47f8b 100644 --- a/tests/test_psnr.py +++ b/tests/test_psnr.py @@ -2,7 +2,7 @@ import pytest from skimage.metrics import peak_signal_noise_ratio -from photosynthesis_metrics import psnr +from piq import psnr @pytest.fixture(scope='module') diff --git a/tests/test_ssim.py b/tests/test_ssim.py index 508d2b1c..24bf5cf2 100644 --- a/tests/test_ssim.py +++ b/tests/test_ssim.py @@ -3,7 +3,7 @@ import pytest import tensorflow as tf -from photosynthesis_metrics import SSIMLoss, MultiScaleSSIMLoss, ssim, multi_scale_ssim +from piq import SSIMLoss, MultiScaleSSIMLoss, ssim, multi_scale_ssim @pytest.fixture(scope='module') @@ -155,14 +155,14 @@ def test_ssim_raises_if_kernel_size_greater_than_image() -> None: def test_ssim_raise_if_wrong_value_is_estimated(prediction: torch.Tensor, target: torch.Tensor) -> None: - photosynthesis_ssim = ssim(prediction, target, kernel_size=11, kernel_sigma=1.5, data_range=1., size_average=False) + piq_ssim = ssim(prediction, target, kernel_size=11, kernel_sigma=1.5, data_range=1., size_average=False) tf_prediction = tf.convert_to_tensor(prediction.permute(0, 2, 3, 1).numpy()) tf_target = tf.convert_to_tensor(target.permute(0, 2, 3, 1).numpy()) tf_ssim = torch.tensor(tf.image.ssim(tf_prediction, tf_target, max_val=1.).numpy()) - assert torch.isclose(photosynthesis_ssim, tf_ssim, atol=1e-6).all(), \ + assert torch.isclose(piq_ssim, tf_ssim, atol=1e-6).all(), \ f'The estimated value must be equal to tensorflow provided one' \ f'(considering floating point operation error up to 1 * 10^-6), ' \ - f'got difference {(photosynthesis_ssim - tf_ssim).abs()}' + f'got difference {(piq_ssim - tf_ssim).abs()}' # ================== Test class: `SSIMLoss` ================== @@ -449,30 +449,30 @@ def test_multi_scale_ssim_raises_if_kernel_size_greater_than_image() -> None: def test_multi_scale_ssim_raise_if_wrong_value_is_estimated(prediction: torch.Tensor, target: torch.Tensor) -> None: - photosynthesis_ms_ssim = multi_scale_ssim(prediction, target, kernel_size=11, kernel_sigma=1.5, - data_range=1., size_average=False) + piq_ms_ssim = multi_scale_ssim(prediction, target, kernel_size=11, kernel_sigma=1.5, + data_range=1., size_average=False) tf_prediction = tf.convert_to_tensor(prediction.permute(0, 2, 3, 1).numpy()) tf_target = tf.convert_to_tensor(target.permute(0, 2, 3, 1).numpy()) tf_ms_ssim = torch.tensor(tf.image.ssim_multiscale(tf_prediction, tf_target, max_val=1.).numpy()) - assert torch.isclose(photosynthesis_ms_ssim, tf_ms_ssim, atol=1e-4).all(), \ + assert torch.isclose(piq_ms_ssim, tf_ms_ssim, atol=1e-4).all(), \ f'The estimated value must be equal to tensorflow provided one' \ f'(considering floating point operation error up to 1 * 10^-4), ' \ - f'got difference {(photosynthesis_ms_ssim - tf_ms_ssim).abs()}' + f'got difference {(piq_ms_ssim - tf_ms_ssim).abs()}' def test_multi_scale_ssim_raise_if_wrong_value_is_estimated_custom_weights(prediction: torch.Tensor, target: torch.Tensor) -> None: scale_weights = [0.0448, 0.2856, 0.3001] - photosynthesis_ms_ssim = multi_scale_ssim(prediction, target, kernel_size=11, kernel_sigma=1.5, - data_range=1., size_average=False, scale_weights=scale_weights) + piq_ms_ssim = multi_scale_ssim(prediction, target, kernel_size=11, kernel_sigma=1.5, + data_range=1., size_average=False, scale_weights=scale_weights) tf_prediction = tf.convert_to_tensor(prediction.permute(0, 2, 3, 1).numpy()) tf_target = tf.convert_to_tensor(target.permute(0, 2, 3, 1).numpy()) tf_ms_ssim = torch.tensor(tf.image.ssim_multiscale(tf_prediction, tf_target, max_val=1., power_factors=scale_weights).numpy()) - assert torch.isclose(photosynthesis_ms_ssim, tf_ms_ssim, atol=1e-4).all(), \ + assert torch.isclose(piq_ms_ssim, tf_ms_ssim, atol=1e-4).all(), \ f'The estimated value must be equal to tensorflow provided one' \ f'(considering floating point operation error up to 1 * 10^-4), ' \ - f'got difference {(photosynthesis_ms_ssim - tf_ms_ssim).abs()}' + f'got difference {(piq_ms_ssim - tf_ms_ssim).abs()}' # ================== Test class: `MultiScaleSSIMLoss` ================== @@ -623,27 +623,27 @@ def test_multi_scale_ssim_loss_raises_if_kernel_size_greater_than_image() -> Non def test_multi_scale_ssim_loss_raise_if_wrong_value_is_estimated(prediction: torch.Tensor, target: torch.Tensor) -> None: - photosynthesis_ms_ssim_loss = MultiScaleSSIMLoss(kernel_size=11, kernel_sigma=1.5, - data_range=1.)(prediction, target) + piq_ms_ssim_loss = MultiScaleSSIMLoss(kernel_size=11, kernel_sigma=1.5, + data_range=1.)(prediction, target) tf_prediction = tf.convert_to_tensor(prediction.permute(0, 2, 3, 1).numpy()) tf_target = tf.convert_to_tensor(target.permute(0, 2, 3, 1).numpy()) tf_ms_ssim = torch.tensor(tf.image.ssim_multiscale(tf_prediction, tf_target, max_val=1.).numpy()).mean() - assert torch.isclose(photosynthesis_ms_ssim_loss, 1 - tf_ms_ssim, atol=1e-4).all(), \ + assert torch.isclose(piq_ms_ssim_loss, 1 - tf_ms_ssim, atol=1e-4).all(), \ f'The estimated value must be equal to tensorflow provided one' \ f'(considering floating point operation error up to 1 * 10^-4), ' \ - f'got difference {(photosynthesis_ms_ssim_loss - 1 + tf_ms_ssim).abs()}' + f'got difference {(piq_ms_ssim_loss - 1 + tf_ms_ssim).abs()}' def test_multi_scale_ssim_loss_raise_if_wrong_value_is_estimated_custom_weights(prediction: torch.Tensor, target: torch.Tensor) -> None: scale_weights = [0.0448, 0.2856, 0.3001] - photosynthesis_ms_ssim_loss = MultiScaleSSIMLoss(kernel_size=11, kernel_sigma=1.5, - data_range=1., scale_weights=scale_weights)(prediction, target) + piq_ms_ssim_loss = MultiScaleSSIMLoss(kernel_size=11, kernel_sigma=1.5, + data_range=1., scale_weights=scale_weights)(prediction, target) tf_prediction = tf.convert_to_tensor(prediction.permute(0, 2, 3, 1).numpy()) tf_target = tf.convert_to_tensor(target.permute(0, 2, 3, 1).numpy()) tf_ms_ssim = torch.tensor(tf.image.ssim_multiscale(tf_prediction, tf_target, max_val=1., power_factors=scale_weights).numpy()).mean() - assert torch.isclose(photosynthesis_ms_ssim_loss, 1 - tf_ms_ssim, atol=1e-4).all(), \ + assert torch.isclose(piq_ms_ssim_loss, 1 - tf_ms_ssim, atol=1e-4).all(), \ f'The estimated value must be equal to tensorflow provided one' \ f'(considering floating point operation error up to 1 * 10^-4), ' \ - f'got difference {(photosynthesis_ms_ssim_loss - 1 + tf_ms_ssim).abs()}' + f'got difference {(piq_ms_ssim_loss - 1 + tf_ms_ssim).abs()}' diff --git a/tests/test_tv.py b/tests/test_tv.py index 5689e8eb..f4588e04 100644 --- a/tests/test_tv.py +++ b/tests/test_tv.py @@ -1,7 +1,7 @@ import torch import pytest -from photosynthesis_metrics import TVLoss, total_variation +from piq import TVLoss, total_variation @pytest.fixture(scope='module') diff --git a/tests/test_utils.py b/tests/test_utils.py index 6edd1ee7..6b915a23 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -3,7 +3,7 @@ import numpy as np -from photosynthesis_metrics.utils import _validate_input +from piq.utils import _validate_input @pytest.fixture(scope='module') diff --git a/tests/test_vif.py b/tests/test_vif.py index c6159db4..89861ec9 100644 --- a/tests/test_vif.py +++ b/tests/test_vif.py @@ -1,7 +1,7 @@ import torch import pytest -from photosynthesis_metrics import VIFLoss, vif_p +from piq import VIFLoss, vif_p @pytest.fixture(scope='module') diff --git a/tox.ini b/tox.ini index 5e1d19dd..b7b0bb43 100644 --- a/tox.ini +++ b/tox.ini @@ -8,7 +8,7 @@ ignore = W293, exclude = .git, __pycache__, workflows, - ./photosynthesis_metrics/__init__.py, + ./piq/__init__.py, max-complexity = 10 max-line-length = 120 \ No newline at end of file