Skip to content

Commit

Permalink
added some infos in index.html
Browse files Browse the repository at this point in the history
  • Loading branch information
federico1-creator committed Jul 5, 2024
1 parent 130791a commit d784cb0
Show file tree
Hide file tree
Showing 2 changed files with 320 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
/static/images/*
settings.json
index_fr.html
317 changes: 317 additions & 0 deletions index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,317 @@
<!DOCTYPE html>
<html>

<head>
<meta charset="utf-8">
<meta name="description"
content="Contrasting Deepfakes Diffusion via Contrastive Learning and Global-Local Similarities">
<meta name="keywords" content="CoDE, DeepFake Detection, Contrastive Learning">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Contrasting Deepfakes Diffusion via Contrastive Learning and Global-Local Similarities
</title>

<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro" rel="stylesheet">

<link rel="stylesheet" href="./static/css/bulma.min.css">
<link rel="stylesheet" href="./static/css/bulma-carousel.min.css">
<link rel="stylesheet" href="./static/css/bulma-slider.min.css">
<link rel="stylesheet" href="./static/css/fontawesome.all.min.css">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
<link rel="stylesheet" href="./static/css/index.css">
<link rel="icon" href="static\images\unimore_logo.png">

<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script defer src="./static/js/fontawesome.all.min.js"></script>
<script src="./static/js/bulma-carousel.min.js"></script>
<script src="./static/js/bulma-slider.min.js"></script>
<script src="./static/js/index.js"></script>
</head>


<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h1 class="title is-1 publication-title">Contrasting Deepfakes Diffusion via Contrastive Learning and Global-Local Similarities</h1>
<div class="is-size-5 publication-authors">
<h1 class="title is-4" style="color: #5c5c5c;">ECCV 2024</h1>

<span class="author-block">
<a href="https://...">Lorenzo Baraldi</a>*<sup>1,2</sup>,</span>
<span class="author-block">
<a href="https://...">Federico Cocchi</a>*<sup>1,2</sup>,</span>
<span class="author-block">
<a href="https://...">Marcella Cornia</a><sup>1</sup>,</span>
</span>
<span class="author-block">
<a href="https://...">Lorenzo Baraldi</a><sup>1</sup>,</span>
</span>
<br>
<span class="author-block">
<a href="https://...">Alessandro Nicolosi</a><sup>3</sup>,</span>
</span>
<span class="author-block">
<a href="https://...">Rita Cucchiara</a><sup>1</sup>,</span>
</span>
</div>

<div class="is-size-5 publication-authors">
<span class="author-block"><sup>1</sup>University of Modena and Reggio Emilia,</span>
<span class="author-block"><sup>2</sup>University of Pisa,</span>
<span class="author-block"><sup>3</sup>Leonardo S.p.A.</span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block">* Equal contribution</span>
</div>

<div class="column has-text-centered">
<span class="link-block">
<a href="https://arxiv.org/abs/2404.06542" class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="ai ai-arxiv"></i>
</span>
<span>arXiv</span>
</a>
</span>

<span class="link-block">
<a href="https://github.com/aimagelab/CoDE"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
</span>

<span class="link-block">
<a href="https://huggingface.co/datasets/elsaEU/ELSA_D3"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<svg class="svg-inline--fa fa-face-smiling-hands" aria-hidden="true" focusable="false" data-prefix="fas" data-icon="face-smiling-hands" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512" data-fa-i2svg=""><path fill="currentColor" d="M411.1 495.3C382.8 506.1 352.1 512 319.1 512C287.9 512 257.2 506.1 228.9 495.3C245.9 473.7 255.1 446.4 255.1 416.8V386C274.1 394.4 295.4 400 319.1 400C344.6 400 365.9 394.4 384 386V416.8C384 446.4 394.1 473.7 411.1 495.3V495.3zM575.7 242.6C558.8 236.8 539.5 240.6 526.1 254.1L478.1 301.1C469.6 287.4 453.9 278.4 436 278.4C407.3 278.4 384 301.7 384 330.4V349.6C367.2 360.3 345.9 368 319.1 368C294.1 368 272.8 360.3 255.1 349.6V330.4C255.1 301.7 232.7 278.4 203.1 278.4C186.1 278.4 170.4 287.4 161 301.1L113.9 254.1C100.5 240.6 81.15 236.8 64.34 242.6C71.31 107.5 183.1 0 319.1 0C456.9 0 568.7 107.5 575.7 242.6V242.6zM281.6 228.8C283.7 231.6 287.3 232.7 290.5 231.6C293.8 230.5 295.1 227.4 295.1 224C295.1 206.1 289.3 188.4 279.4 175.2C269.6 162.2 255.5 152 239.1 152C224.5 152 210.4 162.2 200.6 175.2C190.7 188.4 183.1 206.1 183.1 224C183.1 227.4 186.2 230.5 189.5 231.6C192.7 232.7 196.3 231.6 198.4 228.8L198.4 228.8L198.6 228.5C198.8 228.3 198.1 228 199.3 227.6C199.1 226.8 200.9 225.7 202.1 224.3C204.6 221.4 208.1 217.7 212.3 213.1C221.1 206.2 231.2 200 239.1 200C248.8 200 258.9 206.2 267.7 213.1C271.9 217.7 275.4 221.4 277.9 224.3C279.1 225.7 280 226.8 280.7 227.6C281 228 281.2 228.3 281.4 228.5L281.6 228.8L281.6 228.8zM450.5 231.6C453.8 230.5 456 227.4 456 224C456 206.1 449.3 188.4 439.4 175.2C429.6 162.2 415.5 152 400 152C384.5 152 370.4 162.2 360.6 175.2C350.7 188.4 344 206.1 344 224C344 227.4 346.2 230.5 349.5 231.6C352.7 232.7 356.3 231.6 358.4 228.8L358.4 228.8L358.6 228.5C358.8 228.3 358.1 228 359.3 227.6C359.1 226.8 360.9 225.7 362.1 224.3C364.6 221.4 368.1 217.7 372.3 213.1C381.1 206.2 391.2 200 400 200C408.8 200 418.9 206.2 427.7 213.1C431.9 217.7 435.4 221.4 437.9 224.3C439.1 225.7 440 226.8 440.7 227.6C441 228 441.2 228.3 441.4 228.5L441.6 228.8L441.6 228.8C443.7 231.6 447.3 232.7 450.5 231.6V231.6zM68.69 299.3C62.44 293.1 62.44 282.9 68.69 276.7C74.93 270.4 85.06 270.4 91.31 276.7L170.3 355.7C175.4 360.8 184 357.2 184 350.1V330.4C184 319.4 192.1 310.4 204 310.4C215 310.4 224 319.4 224 330.4V416.8C224 469.4 181.4 512 128.8 512C103.6 512 79.34 501.1 61.49 484.1L4.686 427.3C-1.562 421.1-1.562 410.9 4.686 404.7C10.93 398.4 21.07 398.4 27.31 404.7L46.63 424C49.22 426.6 53.41 426.6 55.1 424C58.59 421.4 58.59 417.2 55.1 414.6L4.686 363.3C-1.562 357.1-1.562 346.9 4.686 340.7C10.93 334.4 21.07 334.4 27.31 340.7L78.63 392C81.22 394.6 85.41 394.6 87.1 392C90.59 389.4 90.59 385.2 87.1 382.6L20.69 315.3C14.44 309.1 14.44 298.9 20.69 292.7C26.93 286.4 37.06 286.4 43.31 292.7L110.6 360C113.2 362.6 117.4 362.6 119.1 360C122.6 357.4 122.6 353.2 119.1 350.6L68.69 299.3zM520 350.6C517.4 353.2 517.4 357.4 520 360C522.6 362.6 526.8 362.6 529.4 360L596.7 292.7C602.9 286.4 613.1 286.4 619.3 292.7C625.6 298.9 625.6 309.1 619.3 315.3L552 382.6C549.4 385.2 549.4 389.4 552 392C554.6 394.6 558.8 394.6 561.4 392L612.7 340.7C618.9 334.4 629.1 334.4 635.3 340.7C641.6 346.9 641.6 357.1 635.3 363.3L584 414.6C581.4 417.2 581.4 421.4 584 424C586.6 426.6 590.8 426.6 593.4 424L612.7 404.7C618.9 398.4 629.1 398.4 635.3 404.7C641.6 410.9 641.6 421.1 635.3 427.3L578.5 484.1C560.7 501.1 536.4 512 511.2 512C458.6 512 416 469.4 416 416.8V330.4C416 319.4 424.1 310.4 436 310.4C447 310.4 456 319.4 456 330.4V350.1C456 357.2 464.6 360.8 469.7 355.7L548.7 276.7C554.9 270.4 565.1 270.4 571.3 276.7C577.6 282.9 577.6 293.1 571.3 299.3L520 350.6z"></path></svg>
</span>
<span>Dataset</span>
</a>
</div>

</div>
</div>
</div>
</div>
</div>
</section>
<section class="hero teaser">
<div class="container is-max-desktop">
<div class="hero-body">
<!--<img src="./static/images/huggingface_d3_gif.gif">-->
<h2 class="subtitle has-text-centered">
<span class="dnerf">D<sup>3</sup></span> is a multimodal dataset that contains 9.2M generated images, generated with four SoTA diffusion model generators.
Each image is generated starting from a <a href="https://laion.ai/blog/laion-400-open-dataset/"> LAION-400M </a> caption, thus referring to a realistic textual description.
</h2>
</div>
</div>
</section>


<section class="hero is-light is-small">
<div class="hero-body">
<div class="container">
<div id="results-carousel" class="carousel results-carousel">
<div class="item item-steve">
<img poster="" src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001088.png" id="steve" autoplay controls muted loop playsinline height="100%">
</div>
<div class="item item-chair-tp">
<img poster="" src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001090.png" id="steve" autoplay controls muted loop playsinline height="100%">
</div>
<div class="item item-shiba">
<img poster="" src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001091.png" id="steve" autoplay controls muted loop playsinline height="100%">
</div>
<div class="item item-fullbody">
<img poster="" src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001092.png" id="steve" autoplay controls muted loop playsinline height="100%">
</div>
<div class="item item-blueshirt">
<img poster="" src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001093.png" id="steve" autoplay controls muted loop playsinline height="100%">
</div>
<div class="item item-mask">
<img poster="" src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001094.png" id="steve" autoplay controls muted loop playsinline height="100%">
</div>
<div class="item item-coffee">
<img poster="" src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001095.png" id="steve" autoplay controls muted loop playsinline height="100%">
</div>
</div>
</div>
</div>
</section>


<section class="section">
<div class="container is-max-desktop">
<!-- Abstract. -->
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">A dataset for Large-scale Deepfake Detection</h2>
<div class="content has-text-justified">
<p>
Existing deepfake detection datasets are limited in their diversity of generators and quantity of images. Therefore, we create and release a new dataset that can support learning deepfake detection methods from scratch.
Our <b>D</b>iffusion-generated <b>D</b>eepfake <b>D</b>etection dataset (D<sup>3</sup>) contains nearly 2.3M records and 11.5M images.
Each record in the dataset consists of a prompt, a real image, and four images generated with as many generators.
Prompts and corresponding real images are taken from <a href="https://laion.ai/blog/laion-400-open-dataset/"> LAION-400M </a>, while fake images are generated, starting from the same prompt, using different text-to-image generators.

</p>
<p>
We employ four state-of-the-art opensource diffusion models, namely Stable Diffusion 1.4 (SD-1.4), Stable Diffusion 2.1 (SD-2.1), Stable Diffusion XL (SD-XL), and DeepFloyd IF (DF-IF).
While the first three generators are variants of the Stable Diffusion approach, DeepFloyd IF is strongly inspired by Imagen and thus represents a different generation technique.
</p>
<p>
With the aim of increasing the variance of the dataset, images have been generated with different aspect ratios, 256<sup>2</sup>, 512<sup>2</sup>, 640x480, and 640x360.
Moreover, to mimic the distribution of real images, we also employ a variety of encoding and compression methods (BMP, GIF, JPEG, TIFF, PNG).
In particular, we closely follow the distribution of encoding methods of LAION itself, therefore favoring the presence of JPEG-encoded images.
</p>
</div>
</div>
</div>
<!--/ Abstract. -->
</div>
</section>



<!--/ Animation. -->
<!--<div class="columns is-centered">
<div class="column is-full-width">
<h2 class="title is-3">Animation</h2>
<div class="slider-container-lollo">
<div class="slider-lollo">
Images will be added dynamically using JavaScript
</div>
</div>
</div>
</div>
-->


<!-- Concurrent Work. -->
<!--<div class="columns is-centered">
<div class="column is-full-width">
<h2 class="title is-3">Related Links</h2>
<div class="content has-text-justified">
<p>
There's a lot of excellent work that was introduced around the same time as ours.
</p>
<p>
<a href="https://arxiv.org/abs/2104.09125">Progressive Encoding for Neural Optimization</a> introduces an idea similar to our windowed position encoding for coarse-to-fine optimization.
</p>
<p>
<a href="https://www.albertpumarola.com/research/D-NeRF/index.html">D-NeRF</a> and <a href="https://gvv.mpi-inf.mpg.de/projects/nonrigid_nerf/">NR-NeRF</a>
both use deformation fields to model non-rigid scenes.
</p>
<p>
Some works model videos with a NeRF by directly modulating the density, such as <a href="https://video-nerf.github.io/">Video-NeRF</a>, <a href="https://www.cs.cornell.edu/~zl548/NSFF/">NSFF</a>, and <a href="https://neural-3d-video.github.io/">DyNeRF</a>
</p>
<p>
There are probably many more by the time you are reading this. Check out <a href="https://dellaert.github.io/NeRF/">Frank Dellart's survey on recent NeRF papers</a>, and <a href="https://github.com/yenchenlin/awesome-NeRF">Yen-Chen Lin's curated list of NeRF papers</a>.
</p>
</div>
</div>
</div>
-->
<!--/ Concurrent Work. -->
<section class="hero is-light is-small">
<div class="hero-body">
<div class="container">
<div class="columns is-centered has-text-centered">
<img src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001101.gif" class="dimension-fixed" height="800px">
</div>
</div>
</div>
</section>

<section class="section">
<div class="container is-max-desktop">
<!-- Abstract. -->
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">Secure and Safe AI</h2>
<div class="content has-text-justified">
<p>
This work has been done under the Multimedia use case of the European network <a href="https://www.elsa-ai.eu/">ELSA - European Lighthouse on Secure and Safe AI</a>.
The objective of the Multimedia use case is to develop effective solutions for detecting and mitigating the spread of deep fake images in multimedia content.
</p>
<p>
Machine-generated images are becoming more and more popular in the digital world, thanks to the spread of Deep Learning models that can generate visual data like Generative Adversarial Networks, and Diffusion Models. While image generation tools can be employed for lawful goals (e.g., to assist content creators, generate simulated datasets, or enable multi-modal interactive applications), there is a growing concern that they might also be used for illegal and malicious purposes, such as the forgery of natural images, the generation of images in support of fake news, misogyny or revenge porn. While the results obtained in the past few years contained artefacts which made generated images easily recognizable, today's results are way less recognizable from a pure perceptual point of view. In this context, assessing the authenticity of fake images becomes a fundamental goal for security and for guaranteeing a degree of trustworthiness of AI algorithms. There is a growing need, therefore, to develop automated methods which can assess the authenticity of images (and, in general, multimodal content), and which can follow the constant evolution of generative models, which become more realistic over time.
</p>
</div>
</div>
</div>
<!--/ Abstract. -->
</div>
</section>

<section class="section">
<div class="container is-max-desktop">
<!-- Abstract. -->
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">The Challenge on Deepfake Detection</h2>
<div class="content has-text-justified">
<p>
Join our thrilling <a href="https://benchmarks.elsa-ai.eu/?ch=3&com=introduction"
class="external-link">competition</a> on deepfake detection and put your skills to the test. As the rise of deepfake technology poses unprecedented challenges, we invite individuals and teams from all backgrounds to showcase their expertise in identifying and debunking manipulated media.
</p>
</div>
</div>
</div>
<!--/ Abstract. -->
</div>
</section>

<!--<section class="section" id="BibTeX">
<div class="container is-max-desktop content">
<h2 class="title">Acknowledgment</h2>
<pre><code>@article{park2021nerfies,
author = {Park, Keunhong and Sinha, Utkarsh and Barron, Jonathan T. and Bouaziz, Sofien and Goldman, Dan B and Seitz, Steven M. and Martin-Brualla, Ricardo},
title = {Nerfies: Deformable Neural Radiance Fields},
journal = {ICCV},
year = {2021},
}</code></pre>
</div>
</section>
-->

<footer class="footer">
<div class="container">
<!-- <div class="content has-text-centered footer-icons">
<a class=""
href="https://international.unimore.it/">
<img src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001097.png">
</a>
<a class="" href="https://www.leonardo.com/it/innovation-technology/leonardo-labs" class="external-link" disabled>
<img src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001098.png">
</a>
<a class="" href="https://www.elsa-ai.eu/" class="external-link" disabled>
<img src="https://aimagelab.ing.unimore.it/imagelab/uploadedImages/001099.png">
</a>
</div> -->


<div class="columns is-centered">
<div class="column is-8">
<div class="content">
<p>
This website is licensed under a <a rel="license"
href="http://creativecommons.org/licenses/by-sa/4.0/">Creative
Commons Attribution-ShareAlike 4.0 International License</a>.
</p>
<p>
Website powered by AImageLab | HTML template from <a href="https://github.com/nerfies/nerfies.github.io">Nerfies</a>
</p>
</div>
</div>
</div>
</div>
</footer>

</body>
</html>

0 comments on commit d784cb0

Please sign in to comment.