@inbook{52db295a9f76476b9661d6e26be43d16,
title = "Sound Sharing and Retrieval",
abstract = "Multimedia sharing has experienced an enormous growth in recent years, and sound sharing has not been an exception. Nowadays one can find online sound sharing sites in which users can search, browse, and contribute large amounts of audio content such as sound effects, field and urban recordings, music tracks, and music samples. This poses many challenges to enable search, discovery, and ultimately reuse of this content. In this chapter we give an overview of different ways to approach such challenges. We describe how to build an audio database by outlining different aspects to be taken into account. We discuss metadata-based descriptions of audio content and different searching and browsing techniques that can be used to navigate the database. In addition to metadata, we show sound retrieval techniques based on the extraction of audio features from (possibly) unannotated audio. We end the chapter by discussing advanced approaches to sound retrieval and by drawing some conclusions about present and future of sound sharing and retrieval. In addition to our explanations, we provide code examples that illustrate some of the concepts discussed.",
keywords = "Audio database, Audio features, Audio indexing, Audio metadata, Machine learning, Multimedia, Query by example, Similarity search, Sound description, Sound exploration, Sound retrieval, Sound search, Sound sharing, Sound taxonomy",
author = "Frederic Font and Gerard Roma and Xavier Serra",
year = "2017",
month = sep,
day = "22",
doi = "10.1007/978-3-319-63450-0_10",
language = "English",
isbn = "9783319634494",
pages = "279--301",
editor = "Tuomas Virtanen and Mark Plumbley and Dan Ellis",
booktitle = "Computational Analysis of Sound Scenes and Events",
publisher = "Springer, Cham",
address = "Switzerland",
edition = "1st",
}