From d9577e704c3d4577189f2c91e6ba306e64100fae Mon Sep 17 00:00:00 2001 From: Subash33 Date: Thu, 24 Oct 2024 03:12:37 +0000 Subject: [PATCH] fixed some bugs --- PSM/index.html | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/PSM/index.html b/PSM/index.html index c3e894f..c133088 100644 --- a/PSM/index.html +++ b/PSM/index.html @@ -87,7 +87,7 @@

PSM: Learning Probabilistic Embeddings
- Washington University
ACM Multimedia, 2024
+ Washington University in St. Louis
ACM Multimedia, 2024
@@ -145,7 +145,7 @@

PSM: Learning Probabilistic Embeddings

Abstract

- A soundscape is defined by the acoustic environment a person perceives at a location. In this work, we propose a framework for mapping soundscapes across the Earth. Since soundscapes involve sound distributions that span varying spatial scales, we represent locations with multi-scale satellite imagery and learn a joint rep- resentation among this imagery, audio, and text. To capture the inherent uncertainty in the soundscape of a location, we design the representation space to be probabilistic. We also fuse ubiqui- tous metadata (including geolocation, time, and data source) to enable learning of spatially and temporally dynamic representa- tions of soundscapes. We demonstrate the utility of our framework by creating large-scale soundscape maps integrating both audio and text with temporal control. To facilitate future research on this task, we also introduce a large-scale dataset, GeoSound, contain- ing over 300𝑘 geotagged audio samples paired with both low- and high-resolution satellite imagery. We demonstrate that our method outperforms the existing state-of-the-art on both GeoSound and the existing SoundingEarth dataset. + A soundscape is defined by the acoustic environment a person perceives at a location. In this work, we propose a framework for mapping soundscapes across the Earth. Since soundscapes involve sound distributions that span varying spatial scales, we represent locations with multi-scale satellite imagery and learn a joint representation among this imagery, audio, and text. To capture the inherent uncertainty in the soundscape of a location, we design the representation space to be probabilistic. We also fuse ubiquitous metadata (including geolocation, time, and data source) to enable learning of spatially and temporally dynamic representations of soundscapes. We demonstrate the utility of our framework by creating large-scale soundscape maps integrating both audio and text with temporal control. To facilitate future research on this task, we also introduce a large-scale dataset, GeoSound, contain- ing over 300𝑘 geotagged audio samples paired with both low- and high-resolution satellite imagery. We demonstrate that our method outperforms the existing state-of-the-art on both GeoSound and the existing SoundingEarth dataset.

@@ -224,19 +224,12 @@

Satellite Image to Sound Retrieval

BibTeX

@inproceedings{khanal2024psm,
-        annotation = {remote_sensing,spotlight},
+        title = {PSM: Learning Probabilistic Embeddings for Multi-scale Zero-Shot Soundscape Mapping},
         author = {Khanal, Subash and Xing, Eric and Sastry, Srikumar and Dhakal, Aayush and Xiong, Zhexiao and Ahmad, Adeel and Jacobs, Nathan},
-        thumbnail = {/thumbnails/psm.jpg},
-        booktitle = {ACM Multimedia},
-        title = {{PSM}: Learning Probabilistic Embeddings for Multi-scale Zero-shot Soundscape Mapping},
-        author+an = {7=highlight},
-        pdf = {https://arxiv.org/pdf/2408.07050},
-        eprint = {2408.07050},
-        archiveprefix = {arXiv},
-        primaryclass = {cs.CV},
-        month = oct,
-        day = {28},
-        year = {2024}}
+ year = {2024}, + month = nov, + booktitle = {Association for Computing Machinery Multimedia (ACM Multimedia)}, + }