@string{JASA = "{\bf J}.\ {\bf A}cous.\ {\bf S}oc.\ {\bf
          A}mer."}
Journal of the Acoustical Society of America

@string{JASJ = "{\bf J}.\ {\bf A}cous.\ {\bf S}oc.\ {\bf J}ap."}
@string{JAESJ = "{\bf J}.\ {\bf A}cous.\ {\bf S}oc.\ {\bf J}ap. (English)"}
@string{ASJ = "ASJ: Proc.\ {\bf A}coustical {\bf S}ociety of {\bf J}apan"}
@string{ACUSTICA = "ACUSTICA united with {acta acustica}: Journal of
          the European Acoustics Association ({\sc eeig})"}
@misc{bis-1991,
   author    = {Jens Herder},
   title     = {{\mbox Konzeption, Implementierung und Integration einer Komponente zur inkrementellen Bezeichner- und Operatoranalyse innerhalb des PSGs}},
   month     = feb,
   year      = {1991},
   howpublished = {Diplomarbeit (master thesis in German), Technische Hochschule Darmstadt},
   abstract  = {Der Programmier System Generator - PSG - des
          Fachgebiets Praktische Informatik in Darmstadt
          erzeugt aus einer Sprachdefinition eine
          sprachspezifische Programmierumgebung. Diese besteht
          u. a. aus einem Editor, welcher syntaktische und
          semantische Fehler von Programmfragmenten, die nicht
          vollst{\"a}ndig sein m{\"u}ssen, erkennen kann. Dem
          Benutzer werden per Men{\"u} Fehlerkorrekturen
          angeboten. Neben der freien Texteingabe besteht die
          M{\"o}glichkeit, den Text nur mit Hilfe von Men{\"u}s zu
          verfeinern. Teil dieses Editors ist die
          Bezeichneranalyse. Sie dient als Hilfsmittel f{\"u}r
          den Benutzer, indem f{\"u}r jede Stelle eines
          Programmfragmentes die g{\"u}ltigen Bezeichner
          ausgegeben werden k{\"o}nnen. Die Kontextanalyse setzt
          die Berechnung auf den von der Bezeichneranalyse
          erzeugten Daten auf, um semantische Fehler zu
          erkennen. Die bis zu dieser Arbeit verwendete
          Bezeichneranalyse im PSG unterst{\"u}tzt nur einfache
          Sprachkonzepte (z. B. Fortran und Pascal). Die
          G{\"u}ltigkeitskonzepte der Bezeichner von
          weiterentwickelten Sprachen (z. B. Modula-2, CHILL,
          Ada oder Pascal-XT) sind nicht vollst{\"a}ndig
          modellierbar. Wir stellen ein neues Konzept zur
          Definition und Berechnung der Bezeichneranalyse vor,
          das alle uns bekannten Sprachen mit statischer
          Typbindung unterst{\"u}tzt. Hierf{\"u}r haben wir die
          Sprache BIS - Bezeichneridentifikationssprache -
          definiert. Die Methode ist verwandt mit dem
          Zwischencode f{\"u}r geordnete Attributierte
          Grammatiken. F{\"u}r jeden Knoten des Abstrakten
          Syntaxbaumes wird mit Hilfe von BIS ein Code f{\"u}r
          eine abstrakte Maschine, welche die
          Bezeichneranalyse durchf{\"u}hrt, geschrieben. Im
          Gegensatz zu herk{\"o}mlichen Methoden (verkettete
          Symboltabellen) wird f{\"u}r jeden Punkt innerhalb
          eines Programmes vor der Anfrage durch den Benutzer
          oder der Kontextanalyse die Menge der g{\"u}ltigen
          Bezeichner berechnet. Die Kosten f{\"u}r eine Anfrage
          sind dadurch minimal. Diese abstrakte Maschine teilt
          sich in zwei unabh{\"a}ngige Maschinen auf, zum einen
          in die S-Maschine, die die speziellen Operationen
          der Bezeichneranalyse durchf{\"u}hrt, und zum anderen
          in die G-Maschine, die den Datenfluss und die
          Auswertung steuert. Diese Aufteilung erm{\"o}glicht den
          Austausch der S-Maschine durch eine andere, welche
          neue Anwendungsgebiete erschliesst, z. B. die eines
          Praeprozessors. Die G-Maschine arbeitet
          inkrementell; es werden nur die Codeschablonen neu
          ausgewertet, deren geerbten Attribute sich ge{\"a}ndert
          haben. Dazu m{\"u}ssen die Daten, die in einer
          Codeschablone hinein- und hinausfliessen, abgelegt
          werden. Dies ergibt bei grossen Programmfragmenten
          eine immense Rechenzeiteinsparung auf Kosten des
          Speicherplatzes. Die Funktionsweise wird an einer
          kleinen Beispielsprache demonstriert, die zu Pascal
          {\"a}hnlich ist. Diese besitzt Konstrukte zum Import
          und Export von Daten und Datentypen zwischen
          Programmfragmenten. Im Prototyp kann die
          inkrementelle Arbeitsweise abgeschaltet werden und
          erm{\"o}glicht einen guten Vergleich der Verfahren. }
}


@techreport{clib-1992,
   author    = {Jens Herder and Jan Hesse and Rainer K{\"o}nig and Filippo Logi},
   title     = {{\mbox A Prototype of an Interface Builder for the Common Lisp Interface Manager - CLIB}},
   institution = {Forschungszentrum Informatik (FZI), Technical Expert Systems and Robotics},
   year      = {1992}
}


@article{clib-sig-1993,
   author    = {Jan Hesse and Rainer K{\"o}nig and Filippo Logi and Jens Herder},
   title     = {{\mbox A Prototype of an Interface Builder for the Common Lisp Interface Manager --- CLIB}},
   journal   = {ACM Sigplan Notices},
   year      = {1993},
   month     = aug,
   volume    = {28},
   number    = {8},
   pages     = {19--28},
   institution = {Forschungszentrum Informatik (FZI), Technical Expert
          Systems and Robotics},
   abstract      = {The Common Lisp Interface Manager (CLIM) is used to
          develop graphical user interfaces for Lisp-based
          applications. With the prototype of the CLIM
          interface Builder (CLIB) the programmer can generate
          code for CLIM interactively. The developing process
          will be fast and less prone to errors. With this new
          tool, the interactive rapid prototyping reduces
          costs of a specification phase. Here we present the
          concept and first results of the prototype of CLIB.}
}


@misc{hildte-1990,
   author    = {Jens Herder},
   title     = {{\mbox HiLDTe - Konzepte und Implementierung einer Textur-Synthese Sprache}},
   month     = apr,
   year      = {1990},
   howpublished = {Studienarbeit, Technische Hochschule Darmstadt, abstract in Computer Graphik Topics 3/90},
   school    = {Technische Hochschule Darmstadt},
   abstract      = {Texturen k{\"o}nnen als Oberfl{\"a}chenstrukturen realer
          Objekte aufgefasst werden und sind Variationen in
          Farbe, Geometrie, Transparenz, usw.. Im Gegensatz zu
          Algorithmen f{\"u}r die k{\"u}nstliche Generierung von
          Texturen gibt es nur wenige Ans{\"a}tze zu
          Textur-Synthese-Sprachen oder zu Hilfsmittel f{\"u}r
          die Textur-Beschreibung. Die bekannten Werkzeuge
          decken zudem jeweils nur Teilgebiete der Generierung
          ab. Mit HiLDTe (Hierachical Language for the
          Description of Textures) ist nun eine Sprache
          entwickelt worden, mit der m{\"o}glichst alle bekannten
          Texturtypen beschrieben werden k{\"o}nnen. HiLDTe
          basiert auf einem am Fachgebiet GRIS entwickelten
          Texturmodell, in dem Texturen generische, eventuell
          komplex zusammengesetzte Objekte
          repr{\"a}sentieren. Aufgabe dieser Arbeit war es nun,
          Konzepte f{\"u}r die Sprache HiLDTe zu entwickeln, eine
          entsprechende Grammatik aufzubauen und mit Hilfe der
          UNIX-Werkzeuge LEX und YACC einen Compiler zu
          implementieren, mit dem ein ausf{\"u}hrbarer
          Zwischen-Code f{\"u}r die in HiLDTe spezifizierten
          Texturen erstellt wird.}
}

@article{articulation-system:1994,
   author    = {Tosiyasu L. Kunii and Jens Herder and Karol Myszkowski
          and Oleg  Okunev and Galina G. Okuneva and Masumi Ibusuki},
   title     = {Articulation Simulation for an \mbox{Intelligent} \mbox{Dental} \mbox{Care} \mbox{System}},
   journal   = {Displays},
   year      = {1994},
   volume    = {15},
   number    = {3},
   pages     = {181--188},
   abstract  = {{\sc CAD/CAM} techniques are used increasingly in dentistry for
       design and fabrication of teeth restorations. An important issue is
       preserving occlusal contacts of teeth after restoration.
       Traditional techniques based on the use of casts with mechanical
       articulators require manual adjustment of occlusal surface, which
       becomes impractical when hard restoration materials like porcelain
       are used; they are also time and labor consuming.  Most existing
       computer systems ignore completely such an articulation check, or
       perform the check at the level of a tooth and its immediate
       neighbors.

       We present a new mathematical model and a related user interface for
       global articulation simulation, developed for the Intelligent Dental
       Care System project. The aim of the simulation is elimination of the
       use of mechanical articulators and manual adjustment in the process
       of designing dental restorations and articulation diagnostic. The
       mathematical model is based upon differential topological modeling
       of the jawbs considered as a mechanical system.  The user interface
       exploits metaphors that are familiar to dentists from everyday
       practice. A new input device designed specifically for use with
       articulation simulation is proposed.}
}

@inproceedings{visualizat:1996,
   author    = {Karol Myszkowski and Jens Herder and Tosiyasu L. Kunii and Masumi Ibusuki},
   title     = {Visualization  and Analysis  of Occlusion for Human Jaws Using a Functionally Generated Path},
   booktitle = {IS\&T/SPIE Symp. on Electronic Imaging, Visual Data Exploration and Analysis III},
   organization = {The International Society for Optical Engineering},
   year      = {1996},
   month     = jan,
   abstract      = {Dynamic characteristics of occlusion during lower jaw
          motion are useful in the diagnosis of jaw
          articulation problems and in computer-aided
          design/manufacture of teeth restorations. The
          Functionally Generated Path (FGP), produced as a
          surface which envelops the actual occlusal surface
          of the moving opponent jaw, can be used for compact
          representation of dynamic occlusal relations. In
          traditional dentistry FGP is recorded as a bite
          impression in a patient's mouth. We propose an
          efficient computerized technique for FGP
          reconstruction and validate it through
          implementation and testing. The distance maps
          between occlusal surfaces of jaws, calculated for
          multiple projection directions and accumulated for
          mandibular motion, provide information for FGP
          computation. Rasterizing graphics hardware is used
          for fast calculation of the distance
          maps. Real-world data are used: the scanned shape of
          teeth and the measured motion of the lower jaw. We
          show applications of FGP to analysis of the
          occlusion relations and occlusal surface design for
          restorations.}
}


@InProceedings{herder-asva:1997,
  author =   "Jens Herder and Michael Cohen",
  title =    "Sound {Spatialization} {Resource} {Management} in {Virtual}
          {Reality} {Environments}",
  booktitle =    "ASVA'97 --- Int. Symp. on Simulation,
          Visualization and Auralization for Acoustic Research and Education",
  year =     1997,
  month =    apr,
  pages =        "407--414",
  organization = "The Acoustical Society of Japan (ASJ)",
  address =      "Tokyo, Japan",
  abstract =     "In a virtual reality environment users are immersed
          in a scene with objects which might produce
          sound. The responsibility of a VR environment is to
          present these objects, but a system has only limited
          resources, including spatialization channels
          (mixels), MIDI/audio channels, and processing
          power. The sound spatialization resource manager
          controls sound resources and optimizes fidelity
          (presence) under given conditions. For that a
          priority scheme based on human psychophysical
          hearing is needed. Parameters for spatialization
          priorities include intensity calculated from volume
          and distance, orientation in the case of non-uniform
          radiation patterns, occluding objects, frequency
          spectrum (low frequencies are harder to localize),
          expected activity, and others. Objects which are
          spatially close together (depending on distance and
          direction) can be mixed. Sources that can not be
          spatialized can be treated as a single ambient sound
          source. Important for resource management is the
          resource assignment, i.e., minimizing swap
          operations, which makes it desirable to look-ahead
          and predict upcoming events in a scene. Prediction
          is achieved by monitoring objects' speed and past
          evaluation values. Fidelity is contrasted for
          different kind of resource restrictions and optimal
          resource assignment based upon unlimited dynamic
          scene look-ahead. To give standard and comparable
          results, the VRML 2.0 specification is used as an
          application programmer interface. Applicability is
          demonstrated with a helical keyboard, a polyphonic
          MIDI stream driven animation including user
          interaction (user moves around, playing together
          with programmed notes). The developed sound
          spatialization resource manager gives improved
          spatialization fidelity under runtime
          constraints. Application programmers and virtual
          reality scene designers are freed from the burden of
          assigning and predicting the sound sources. "
}

@InProceedings{psfc-roman:1996,
  author =   "Katsumi Amano and Fumio Matsushita and Hirofumi
          Yanagawa and Michael Cohen and Jens Herder and Yoshiharu Koba and Mikio Tohyama",
  title =    "{\sc psfc}: the {Pioneer} {Sound} {Field} {Control}
          {System} at the {University} of {Aizu} {Multimedia}
          {Center}",
  booktitle =    "{\sc ro-man}'96 - 5th {\sc ieee} International Workshop on Robot
          and Human Communication",
  year =     1996,
  organization = "{\sc ieee}",
  month =    nov,
  abstract =     "The PSFC, or Pioneer Sound Field Control System, is
          a DSP-driven hemispherical 14-loudspeaker array,
          installed at the University of Aizu Multimedia
          Center. Collocated with a large screen
          rear-projection stereographic display, the PSFC
          features realtime control of virtual room
          characteristics and direction of two separate sound
          sources, smoothly steering them around a
          configurable soundscape. The PSFC controls an entire
          sound field, including sound direction, virtual
          distance, and simulated environment (reverb level,
          room size and liveness) for each source. It can also
          configure a dry (DSP-less) switching matrix for
          direct directionalization. The PSFC speaker dome is
          about 14m in diameter, allowing about 20 users at
          once to comfortably stand or sit near its sweet spot."
}

@InProceedings{herder-icad:1996,
  author =   "Jens Herder and Michael Cohen",
  title =    "Design of a {Helical} {Keyboard}",
  editor =   "Steven P. Frysinger and Gregory Kramer",
  booktitle =    "{\sc icad}'96 --- Int. Conf. on Auditory Display",
  year =     1996,
  address =      "Palo Alto, CA; USA",
  month =    nov,
  abstract =     "Inspired by the cyclical nature of octaves and
          helical structure of a scale (Shepard, '82 and '83),
          we prepared a model of a piano-style keyboard
          (prototyped in Mathematica), which was then
          geometrically warped into a left-handed helical
          configuration, one octave/revolution, pitch mapped
          to height. The natural orientation of upper
          frequency keys higher on the helix suggests a
          parsimonious left-handed chirality, so that
          ascending notes cross in front of a typical listener
          left to right. Our model is being imported (via the
          dxf file format) into (Open Inventor/)VRML, where it
          can be driven by MIDI events, realtime or sequenced,
          which stream is both synthesized (by a Roland Sound
          Module), and spatialized by a heterogeneous spatial
          sound backend (including the Crystal River
          Engineering Acoustetron II and the Pioneer Sound
          Field Control speaker-array System), so that the
          sound of the respective notes is directionalized
          with respect to sinks, avatars of the human user, by
          default in the tube of the helix. This is a
          work-in-progress which we hope to be fully
          functional within the next few months."
}

@InProceedings{herder-mmvr4:1996,
  author =   "Jens Herder and Karol Myszkowski and Tosiyasu
          L. Kunii and Masumi Ibusuki",
  title =    "A {Virtual} {Reality} {Interface} to an {Intelligent} {Dental}
          {Care} {System}",
  editor =   "Suzanne J. Weghorst and Hans B. Sieburg and Karen S. Morgan",
  volume =   29,
  series =   "Studies in Health Technology and Informatics",
  booktitle =    "Medicine Meets Virtual Reality 4",
  year =     1996,
  publisher =    "IOS Press",
  address =  "Van Diememstraat 94, 1013 CN Amsterdam, Netherlands",
  month =    jan,
  abstract =     "The design and fabrication of teeth restorations in
          dentistry rely increasingly on CAD/CAM
          techniques. We present an approach for interactive
          design of the occlusal surface of teeth based on
          simulation of jaw articulation and computer-aided
          diagnosis of occlusal disorders. To bridge the
          cognitive gap between the dentist and the computer
          system, we propose a virtual reality user interface,
          which applies the metaphors of tools and techniques
          known in dentistry. This makes the restoration
          design more intuitive for dentists. The system uses
          Virtual Reality Modeling Language (VRML) and HTML
          standards to generate a treatment report and
          exchange data in an electronic form. The simulation
          of jaw articulation requires fast calculation of
          multi-point contacts and detection of collisions
          between surfaces of teeth and restorations. We have
          developed a distance maps technique which exhibits
          realtime performance for objects with complex
          geometry and is suitable for other virtual reality
          systems dealing with complex contacts. The
          characteristics of contacts between teeth acquired
          during lower jaw motion are compactly represented as
          accumulated distance maps. These maps are then used
          for automatic removal of interferences between the
          restorations and the opponent teeth, and provide the
          dentist with information for further manual
          adjustments of the occlusal surfaces."
}

@InProceedings{idcs-visual-model:1995,
  author =   "Karol Myszkowski and Galina Okuneva and Jens Herder
          and Tosiyasu L. Kunii and Masumi Ibusuki",
  title =    "Visual Simulation of the Chewing Process for Dentistry",
  booktitle =    "Visualization \& Modelling",
  year =     1995,
  address =  "Leeds",
  month =    dec,
  abstract =     "CAD/CAM techniques are increasingly used in
          dentistry for the design and fabrication of teeth
          restorations. Important concerns are the correction
          of articulation problems that existed before
          treatment and the prevention of treatment-generated
          problems. These require interactive evaluation of
          the occlusal surfaces of teeth during
          mastication. Traditional techniques based on the use
          of casts with mechanical articulators require manual
          adjustment of occlusal surfaces, which becomes
          impractical when hard restoration materials like
          porcelain are used; they are also time and labor
          consuming and provide little visual information. We
          present new visual tools and a related user
          interface for global articulation simulation,
          developed for the Intelligent Dental Care System
          project. The aim of the simulation is visual
          representation of characteristics relevant to the
          chewing process. The simulation is based on the
          construction of distance maps, which are visual
          representations of the distributions of the
          distances of points in a tooth to the opposite
          jaw. We use rasterizing graphics hardware for fast
          calculation of the distance maps. Distance maps are
          used for collision detection and for the derivation
          of various characteristics showing the distribution
          of load on the teeth and the chewing capability of
          the teeth. Such characteristics can be calculated
          for particular positions of the jaws; cumulative
          characteristics are used to describe the properties
          of jaw movement. This information may be used for
          interactive design of the occlusal surfaces of
          restorations and for jaw articulation diagnosis. We
          also demonstrate elements of a user interface that
          exploit metaphors familiar to dentists from everyday practice."
}


@InCollection{idcs-visual-model:1997,
  author =   "Karol Myszkowski and Galina Okuneva and Jens Herder
          and Tosiyasu L. Kunii and Masumi Ibusuki",
  booktitle =    "Visualization \& Modeling",
  title =    "Visual Simulation of the Chewing Process for
          Dentistry",
  chapter =      24,
  publisher =    "Academic Press",
  editor =   "Rae Earnshaw and John Vince and Huw Jones",
  pages =    "419--438",
  address =  "24-28 Oval Road, London NW17DX, UK",
  note =     "{\sc isbn} 0-12-227738-4",
  year =     1997,
  month =    dec,
  abstract =     "CAD/CAM techniques are increasingly used in
          dentistry for the design and fabrication of teeth
          restorations. Important concerns are the correction
          of articulation problems that existed before
          treatment and the prevention of treatment-generated
          problems. These require interactive evaluation of
          the occlusal surfaces of teeth during
          mastication. Traditional techniques based on the use
          of casts with mechanical articulators require manual
          adjustment of occlusal surfaces, which becomes
          impractical when hard restoration materials like
          porcelain are used; they are also time and labor
          consuming and provide little visual information. We
          present new visual tools and a related user
          interface for global articulation simulation,
          developed for the Intelligent Dental Care System
          project. The aim of the simulation is visual
          representation of characteristics relevant to the
          chewing process. The simulation is based on the
          construction of distance maps, which are visual
          representations of the distributions of the
          distances of points in a tooth to the opposite
          jaw. We use rasterizing graphics hardware for fast
          calculation of the distance maps. Distance maps are
          used for collision detection and for the derivation
          of various characteristics showing the distribution
          of load on the teeth and the chewing capability of
          the teeth. Such characteristics can be calculated
          for particular positions of the jaws; cumulative
          characteristics are used to describe the properties
          of jaw movement. This information may be used for
          interactive design of the occlusal surfaces of
          restorations and for jaw articulation diagnosis. We
          also demonstrate elements of a user interface that
          exploit metaphors familiar to dentists from everyday practice."
}

@Misc{english-www:1995,
  author =   "Kiel Christianson and Jens Herder",
  title =    "Mini-lectures in {Computer} {Science} on the {\sc www}",
  howpublished = "University of Aizu, Center for Language Research 1995 Annual Review",
  year =     1995,
  month =    dec,
  note =     "{\tt http://vsvr.medien.hs-duesseldorf.de/publications/clr-\linebreak[0]{}report95csm/clrreport95csm.html}"
}

@InProceedings{herder-perspicuity:1997,
  author =   "Jens Herder and Michael Cohen",
  title =    "Enhancing Perspicuity of Objects in Virtual Reality Environments",
  booktitle =    "CT'97 --- Second Int. Cognitive Technology Conf.",
  year =     1997,
  month =        aug,
  organization = "{\sc ieee}",
  publisher =    "{\sc ieee} Press",
  pages =        "228--237",
  note =     "{\sc isbn} 0-8186-8084-9",
  abstract =     "In an information-rich Virtual Reality (VR)
          environment, the user is immersed in a world
          containing many objects providing that
          information. Given the finite computational
          resources of any computer system, optimization is
          required to ensure that the most important
          information is presented to the user as clearly as
          possible and in a timely fashion. In particular,
          what is desired are means whereby the perspicuity of
          an object may be enhanced when appropriate. An
          object becomes more perspicuous when the information
          it provides to the user becomes more readily
          apparent. Additionally, if a particular object
          provides high-priority information, it would be
          advantageous to make that object obtrusive as well
          as highly perspicuous. An object becomes more
          obtrusive if it draws attention to itself (or
          equivalently, if it is hard to ignore). This paper
          describes a technique whereby objects may
          dynamically adapt their representation in a user's
          environment according to a dynamic priority
          evaluation of the information each object
          provides. The three components of our approach are:

                  - an information manager that evaluates object
            information priority,
                  - an enhancement manager that tabulates rendering
            features associated with increasing object
            perspicuity and obtrusion as a function of priority,
            and
                  - a resource manager that assigns available object
            rendering resources according to features
            indicated by the enhancement manager for the
            priority set for each object by the information
                    manager.

                  We consider resources like visual space (pixels),
          sound spatialization channels (mixels), MIDI/audio
          channels, and processing power, and discuss our
          approach applied to different applications. Assigned
          object rendering features are implemented locally at
          the object level (e.g., object facing the user using
          the billboard node in VRML 2.0) or globally, using
          helper applications (e.g., active spotlights,
          semi-automatic cameras)."
}

@InProceedings{herder-dictionary:1997,
  author =   "Lothar M. Schmitt and Jens Herder and Subhash Bhalla",
  title =    "Information Retrieval and Database Architecture
for Conventional {Japanese} Character Dictionaries",
  booktitle =    "CT'97 --- Second Int. Cognitive Technology Conf.",
  year =     1997,
  month =    aug,
  organization = "{\sc ieee}",
  publisher =    "{\sc ieee} Press",
  pages =        "200--217",
  note =     "{\sc isbn} 0-8186-8084-9",
  abstract =     "The cycle of abstraction-reconstruction which occurs
          as a fundamental principle in the development of
          culture and in cognitive processes is described and
          analyzed. This approach leads to recognition of
          boundary conditions for and directions of probable
          development of cognitive tools. It is shown how the
          transition from a conventional Japanese-English
          character dictionary to a multi-dimensional language
          database is an instance of such an
          abstraction-reconstruction cycle. The different
          phases of the design of a multi-dimensional language
          database based upon different computer software
          technologies are properly placed in this cycle. The
          methods used include the use of UNIX software tools,
          classical database methods as-well-as the use of
          search engines based upon full text search in this
          process. Several directions of application and
          extension for a multi-dimensional language database
          are discussed from the general point of view of an
          abstraction-reconstruction cycle."
}
@Misc{herder-cscw:1997,
  author =   "Jens Herder",
  title =    "Cooperative Tools for Teaching: an Impact of a Network Environment",
  howpublished = "Annual Report 1996 of the Information Systems and
          Technology Center, University of Aizu",
  pages =        "3--8",
  year =     1997,
  abstract =     "Education at the University of Aizu is focussed upon
          computer science. Besides being the subject matter
          of many courses, however, the computer also plays a
          vital role in the educational process itself, both
          in the distribution of instructional media, and in
          providing students with valuable practical
          experience. All students have unlimited access
          (24-hours-a-day) to individual networked
          workstations, most of which are multimedia-capable
          (even video capture is possible in two exercise
          rooms). Without software and content tailored for
          computer-aided instruction, the hardware becomes an
          expensive decoration. In any case, there is a need
          to better educate the instructors and students in
          the use of the equipment. In the interest of
          facilitating effective, collaborative use of
          network-based computers in teaching, this article
          explores the impact that a network environment can
          have on such activities. First, as a general
          overview, and to examine the motivation for the use
          of a network environment in teaching, this article
          reviews a range of different styles of
          collaboration. Then the article shows what kind of
          tools are available for use, within the context of
          what has come to be called Computer-Supported
          Cooperative Work (CSCW)."
}

@Article{herder-hk:2002,
  author =   "Jens Herder and Michael Cohen",
  title =    "The {Helical} {Keyboard}: {P}erspectives for {S}patial {A}uditory {D}isplays and {V}isual {M}usic",
  journal =  "Journal of New Music Research",
  publisher = "Swets & Zeitlinger", 
  volume = 	 {31},
  number = 	 {3},
  pages = 	 {269--281},
  year =     2002,
  abstract = {Auditory displays with the ability to dynamically spatialize virtual sound sources under real-time conditions enable advanced applications for art and music. A listener can be deeply immersed while interacting and participating in the experience. We review some of those applications while focusing on the Helical Keyboard project and discussing the required technology. Inspired by the cyclical nature of octaves and helical structure of a scale, a model of a piano-style keyboard was prepared, which was then geometrically warped into a helicoidal configuration, one octave/revolution, pitch mapped to height and chroma. It can be driven by MIDI events, real-time or sequenced, which stream is both synthesized and spatialized by a spatial sound display. The sound of the respective notes is spatialized with respect to sinks, avatars of the human user, by default in the tube of the helix. Alternative coloring schemes can be applied, including a color map compatible with chromastereoptic eyewear. The graphical display animates polygons, interpolating between the notes of a chord across the tube of the helix. Recognition of simple chords allows directionalization of all the notes of a major triad from the position of its musical root. The system is designed to allow, for instance, separate audition of harmony and melody, commonly played by the left and right hands, respectively, on a normal keyboard. Perhaps the most exotic feature of the interface is the ability to fork oneÃ­s presence, replicating subject instead of object by installing multiple sinks at arbitrary places around a virtual scene so that, for example, harmony and melody can be separately spatialized, using two heads to normalize the octave; such a technique effectively doubles the helix from the perspective of a single listener. Rather than a symmetric arrangement of the individual helices, they are perceptually superimposed in-phase, co-extensively, so that corresponding notes in different registers are at the same azimuth. }
}

@InProceedings{herder-sa:1997,
  author =   "Jens Herder",
  title =    "Tools and {Widgets} for {Spatial} {Sound} {Authoring}",
  editor =   "Harold P. Santo",
  pages =    "87--95",
  address =  "Vilamoura, Portugal",
  booktitle =    "{Compugraphics} '97, Sixth Int. Conf. on
          Computational Graphics and Visualization Techniques:
          Graphics in the Internet Age",
  year =     1997,
  publisher =    "{\sc grasp}",
  month =    dec,
  note =     "{\sc isbn} 972-8342-02-0",
  abstract =     "Broader use of virtual reality environments and sophisticated
    animations spawn a need for spatial sound. Until now, spatial sound
    design has been based very much on experience and trial and error. Most
    effects are hand-crafted, because good design tools for spatial sound
    do not exist. This paper discusses spatial sound authoring and its
    applications, including shared virtual reality environments based on
    VRML. New utilities introduced by this research are an
    inspector for sound sources, an interactive resource manager, and a
    visual soundscape manipulator. The tools are part of a sound
    spatialization framework and allow a designer/author of multimedia
    content to monitor and debug sound events. Resource constraints
    like limited sound spatialization channels can also be simulated."
}

@article{herder-psfc:1998,
    author = "Katsumi Amano and Fumio Matsushita and Hirofumi Yanagawa
            and Michael Cohen and Jens Herder and William Martens
            and Yoshiharu Koba and Mikio Tohyama",
    title = "{A Virtual Reality Sound System Using Room-Related Transfer Functions Delivered Through a Multispeaker Array:
        the PSFC at the University of Aizu Multimedia Center}",
    journal = "TVRSJ: {\bf Trans}.\ of the {\bf V}irtual {\bf R}eality {\bf S}ociety of {\bf J}apan",
    year = 1998,
    volume = 3,
    number = 1,
    pages = "1--12",
    month = mar,
    note = "{\sc issn}~1342-4386",
    abstract = "The {\sc psfc}, or {\bf P}ioneer {\bf S}ound {\bf
          F}ield {\bf C}ontroller, is a {\sc dsp}-driven
          hemispherical loudspeaker array, installed at the
          University of Aizu Multimedia Center. The {\sc psfc}
          features realtime manipulation of the primary
          components of sound spatialization for each of two
          audio sources located in a virtual environment,
          including the content (apparent direction and
          distance) and context (room characteristics:
          reverberation level, room size and liveness). In an
          alternate mode, it can also direct the destination
          of the two separate input signals across 14
          loudspeakers, manipulating the direction of the
          virtual sound sources with no control over apparent
          distance other than that afforded by source loudness
          (including no simulated environmental reflections or
          reverberation). The {\sc psfc} speaker dome is about
          10~m in diameter, accommodating about fifty
          simultaneous users, including about twenty users
          comfortably standing or sitting near its ``sweet
          spot,'' the area in which the illusions of sound
          spatialization are most vivid. Collocated with a
          large screen rear-projection stereographic display,
          the {\sc psfc} is intended for advanced multimedia
          and {\bf v}irtual {\bf r}eality applications."
}

@InProceedings{herder-ex-include:1998,
  author =   "Michael Cohen and Jens Herder",
  title =    "Symbolic representations of exclude and include for
          audio sources and sinks: Figurative suggestions of
          mute/solo \& cue and deafen/confide \& harken",
  booktitle =    "Virtual Environments 98",
  address =  "Stuttgart",
  year =         1998,
  pages =        "95/1--4",
  month =    jun,
  abstract =     "Shared virtual environments require generalized
          control of user-dependent media streams. Traditional
          audio mixing idioms for enabling and disabling
          various sources employ {\tt mute} and {\tt solo}
          functions, which, along with {\tt cue},
          selectively disable or focus on respective channels.
          Exocentric interfaces which explicitly model not
          only spatial audio sources, but also location,
          orientation, directivity, and multiplicity of sinks,
          motivate the generalization of {\tt mute}/{\tt solo} \& {\tt
          cue} to exclude and include, manifested for sinks as
          {\tt deafen}/{\tt confide} \& {\tt harken}, a
          narrowing of stimuli by explicitly blocking out
          and/or concentrating on selected entities. This
          paper introduces figurative representations of these
          functions, virtual hands to be clasped over avatars'
          ears and mouths, with orientation suggesting the
          nature of the blocking. Applications include
          groupware for collaboration and teaching,
          teleconferencing and chat spaces, and authoring and
          manipulation of distributed virtual environments."
}

@InProceedings{herder-ex-include-springer:1998,
  author =   "Michael Cohen and Jens Herder",
  title =    "Symbolic representations of exclude and include for
          audio sources and sinks: Figurative suggestions of
          mute/solo \& cue and deafen/confide \& harken",
  editor =   {M. G{\"o}bel and J. Landauer and U. Lang and M. Wapler},
  booktitle =    "Virtual Environments '98, Proceedings of the
          Eurographics Workshop",
  publisher =    "Springer-Verlag/Wien",
  address =  "Stuttgart, Germany",
  year =         1998,
  pages =        "235--242",
  month =    jun,
  note =     "{\sc isbn} 3-211-83233-5",
  abstract =     "Shared virtual environments require generalized
          control of user-dependent media streams. Traditional
          audio mixing idioms for enabling and disabling
          various sources employ {\tt mute} and {\tt solo}
          functions, which, along with {\tt cue},
          selectively disable or focus on respective channels.
          Exocentric interfaces which explicitly model not
          only spatial audio sources, but also location,
          orientation, directivity, and multiplicity of sinks,
          motivate the generalization of {\tt mute}/{\tt solo} \& {\tt
          cue} to exclude and include, manifested for sinks as
          {\tt deafen}/{\tt confide} \& {\tt harken}, a
          narrowing of stimuli by explicitly blocking out
          and/or concentrating on selected entities. This
          paper introduces figurative representations of these
          functions, virtual hands to be clasped over avatars'
          ears and mouths, with orientation suggesting the
          nature of the blocking. Applications include
          groupware for collaboration and teaching,
          teleconferencing and chat spaces, and authoring and
          manipulation of distributed virtual environments."
}

@InProceedings{herder-spat-server:1998,
  author =   "Kimitaka Ishikawa and Minefumi Hirose and Jens Herder",
  title =    "A Sound Spatialization Server for a Speaker Array as an Integrated Part of a Virtual Environment",
  booktitle =    "{\sc ieee} YUFORIC Germany 1998",
  address =  "Stuttgart",
  year =         1998,
  month =    jun,
  note =         "{\tt http://vsvr.medien.hs-duesseldorf.de/\~{}herder/\linebreak[0]{}publications/\linebreak[0]{}ve98-spatial-server/}",
  abstract =     "Spatial sound plays an important role in virtual reality environments, allowing orientation in space, giving a feeling of space, focusing the
user on events in the scene, and substituting missing feedback cues (e.g., force feedback). The sound spatialization framework of the
University of Aizu, which supports number of spatialization backends, has been extended to include a sound spatialization server for a
multichannel loudspeaker array (Pioneer Sound Field Control System). Our goal is that the spatialization server allows easy integration into
virtual environments. Modeling of distance cues, which are essential for full immersion, is discussed. Furthermore, the integration of this
prototype into different applications allowed us to reveal the advantages and problems of spatial sound for virtual reality environments."
}

@InProceedings{herder-ssf-hc:1998,
  author =   "Jens Herder",
  title =    "Sound Spatialization Framework: An Audio Toolkit for Virtual Environments",
  booktitle =    "First Int. Conf. on Human and Computer",
  year =     1998,
  organization = "University of Aizu",
  address =  "Aizu-Wakamatsu, Japan",
  month =    sep,
  abstract =     "The Sound Spatialization Framework is a C++ toolkit
          and development environment for providing advanced
          sound spatialization for virtual reality and
          multimedia applications. The Sound Spatialization
          Framework provides many powerful display and
          user-interface features not found in other sound
          spatialization software packages. It provides
          facilities that go beyond simple sound source
          spatialization: visualization and editing of the
          soundscape, multiple sinks, clustering of sound
          sources, monitoring and controlling resource
          management, support for various spatialization
          backends, and classes for {\sc midi} animation and
          handling."
}

@InProceedings{herder-perc-obstr:1999,
  author =   "William L. Martens and Jens Herder",
  title =    "Perceptual criteria for eliminating reflectors and
          occluders from the rendering of environmental sound",
  booktitle =    "Proc.\ Joint Meeting of the 137th Regular
          Meeting of the Acoustical Society of America and the
          2nd Convention of the European Acoustics
          Association: Forum Acusticum",
  year =     1999,
  organization = "Acoustical Society of America ({\sc
          asa}), and European Acoustics Association ({\sc eaa})",
  address =  "Berlin",
  month =    mar,
  pages =    "{\sc cdrom}",
  note =     "Signal Processing in Acoustics and Psychological and
          Pysiological Acoustics: Auditory Displays, 1pSP2",
  abstract =     "Given limited computational resources available for
          the rendering of spatial sound imagery, we seek to
          determine effective means for choosing what
          components of the rendering will provide the most
          audible differences in the results. Rather than
          begin with an analytic approach that attempts to
          predict audible differences on the basis of
          objective parameters, we chose to begin with
          subjective tests of how audibly different the
          rendering result may be heard to be when that result
          includes two types of sound obstruction: reflectors
          and occluders.  Single-channel recordings of 90
          short speech sounds were made in an anechoic chamber
          in the presence and absence of these two types of
          obstructions, and as the angle of those obstructions
          varied over a 90 degree range.  These recordings
          were reproduced over a single loudspeaker in that
          anechoic chamber, and listeners were asked to rate
          how confident they were that the recording of each
          of these 90 stimuli included an obstruction.  These
          confidence ratings can be used as an integral
          component in the evaluation function used to
          determine which reflectors and occluders are most
          important for rendering."
}

@Article{herder-perc-obstr-abstract-jasa:1999,
  author =   "William L. Martens and Jens Herder",
  title =    "Perceptual criteria for eliminating reflectors and
          occluders from the rendering of environmental
          sound",
  journal =      JASA,
  volume =       105,
  number =       2,
  year =     1999,
  organization = "Acoustical Society of America ({\sc
          asa}), and European Acoustics Association ({\sc eaa})",
  address =  "Berlin",
  month =    feb,
  pages =    "979",
  note =     "Proc.\ Joint Meeting of the 137th Regular
          Meeting of the Acoustical Society of America and the
          2nd Convention of the European Acoustics
          Association: Forum Acusticum; Signal Processing in
          Acoustics and Psychological and Pysiological
          Acoustics: Auditory Displays, 1pSP2",
  abstract =     "Given limited computational resources available for
          the rendering of spatial sound imagery, we seek to
          determine effective means for choosing what
          components of the rendering will provide the most
          audible differences in the results. Rather than
          begin with an analytic approach that attempts to
          predict audible differences on the basis of
          objective parameters, we chose to begin with
          subjective tests of how audibly different the
          rendering result may be heard to be when that result
          includes two types of sound obstruction: reflectors
          and occluders.  Single-channel recordings of 90
          short speech sounds were made in an anechoic chamber
          in the presence and absence of these two types of
          obstructions, and as the angle of those obstructions
          varied over a 90 degree range.  These recordings
          were reproduced over a single loudspeaker in that
          anechoic chamber, and listeners were asked to rate
          how confident they were that the recording of each
          of these 90 stimuli included an obstruction.  These
          confidence ratings can be used as an integral
          component in the evaluation function used to
          determine which reflectors and occluders are most
          important for rendering."
}

@Article{herder-perc-obstr-abstract-acoustica:1999,
  author =   "William L. Martens and Jens Herder",
  title =    "Perceptual criteria for eliminating reflectors and
          occluders from the rendering of environmental
          sound",
  journal =      ACUSTICA,
  volume =       85,
  number =       "Suppl.\,1",
  year =     1999,
  organization = "Acoustical Society of America ({\sc
          asa}), and European Acoustics Association ({\sc eaa})",
  address =  "Berlin",
  month =    mar,
  pages =    "S53",
  note =     "Proc.\ Joint Meeting of the 137th Regular
          Meeting of the Acoustical Society of America and the
          2nd Convention of the European Acoustics
          Association: Forum Acusticum; Signal Processing in
          Acoustics and Psychological and Pysiological
          Acoustics: Auditory Displays, 1pSP2",
  abstract =     "Given limited computational resources available for
          the rendering of spatial sound imagery, we seek to
          determine effective means for choosing what
          components of the rendering will provide the most
          audible differences in the results. Rather than
          begin with an analytic approach that attempts to
          predict audible differences on the basis of
          objective parameters, we chose to begin with
          subjective tests of how audibly different the
          rendering result may be heard to be when that result
          includes two types of sound obstruction: reflectors
          and occluders.  Single-channel recordings of 90
          short speech sounds were made in an anechoic chamber
          in the presence and absence of these two types of
          obstructions, and as the angle of those obstructions
          varied over a 90 degree range.  These recordings
          were reproduced over a single loudspeaker in that
          anechoic chamber, and listeners were asked to rate
          how confident they were that the recording of each
          of these 90 stimuli included an obstruction.  These
          confidence ratings can be used as an integral
          component in the evaluation function used to
          determine which reflectors and occluders are most
          important for rendering."
}

@InProceedings{herder-filter-obstr:1999,
  author =   "William L. Martens and Jens Herder and Yoshiki Shiba",
  title =    "A filtering model for efficient rendering of
the spatial image of an occluded virtual sound source",
  booktitle =    "Proc.\ Joint Meeting of the 137th Regular
          Meeting of the Acoustical Society of America and the
          2nd Convention of the European Acoustics
          Association: Forum Acusticum",
  year =     1999,
  organization = "Acoustical Society of America ({\sc
          asa}), and European Acoustics Association ({\sc eaa})",
  address =  "Berlin",
  month =    mar,
  pages =    "{\sc cdrom}",
  note =     "Signal Processing in Acoustics and Psychological and
          Pysiological Acoustics: Auditory Displays, 1pSP7",
  abstract =     "Rendering realistic spatial sound imagery for
          complex virtual environments must take into account
          the effects of obstructions such as reflectors and
          occluders. It is relatively well understood how to
          calculate the acoustical consequence that would be
          observed at a given observation point when an
          acoustically opaque object occludes a sound
          source. But the interference patterns generated by
          occluders of various geometries and orientations
          relative to the virtual source and receiver are
          computationally intense if accurate results are
          required. In many applications, however, it is
          sufficient to create a spatial image that is
          recognizable by the human listener as the sound of
          an occluded source. In the interest of improving
          audio rendering efficiency, a simplified filtering
          model was developed and its audio output submitted
          to psychophysical evaluation. Two perceptually
          salient components of occluder acoustics were
          identified that could be directly related to the
          geometry and orientation of a simple
          occluder. Actual occluder impulse responses measured
          in an anechoic chamber resembled the responses of a
          model incorporating only a variable duration delay
          line and a low-pass filter with variable cutoff frequency."
}

@Article{herder-filter-obstr-abstract-jasa:1999,
  author =   "William L. Martens and Jens Herder and Yoshiki Shiba",
  title =    "A filtering model for efficient rendering of
the spatial image of an occluded virtual sound source",
  year =     1999,
  journal =      JASA,
  volume =       105,
  number =       2,
  organization = "Acoustical Society of America ({\sc
          asa}), and European Acoustics Association ({\sc eaa})",
  address =  "Berlin",
  month =    feb,
  pages =    "980",
  note =     "Proc.\ Joint Meeting of the 137th Regular
          Meeting of the Acoustical Society of America and the
          2nd Convention of the European Acoustics
          Association: Forum Acusticum; Signal Processing in Acoustics and Psychological and
          Pysiological Acoustics: Auditory Displays, 1pSP7",
  abstract =     "Rendering realistic spatial sound imagery for
          complex virtual environments must take into account
          the effects of obstructions such as reflectors and
          occluders. It is relatively well understood how to
          calculate the acoustical consequence that would be
          observed at a given observation point when an
          acoustically opaque object occludes a sound
          source. But the interference patterns generated by
          occluders of various geometries and orientations
          relative to the virtual source and receiver are
          computationally intense if accurate results are
          required. In many applications, however, it is
          sufficient to create a spatial image that is
          recognizable by the human listener as the sound of
          an occluded source. In the interest of improving
          audio rendering efficiency, a simplified filtering
          model was developed and its audio output submitted
          to psychophysical evaluation. Two perceptually
          salient components of occluder acoustics were
          identified that could be directly related to the
          geometry and orientation of a simple
          occluder. Actual occluder impulse responses measured
          in an anechoic chamber resembled the responses of a
          model incorporating only a variable duration delay
          line and a low-pass filter with variable cutoff frequency."
}

@Article{herder-filter-obstr-abstract-acoustica:1999,
  author =   "William L. Martens and Jens Herder and Yoshiki Shiba",
  title =    "A filtering model for efficient rendering of
the spatial image of an occluded virtual sound source",
  year =     1999,
  journal =      ACUSTICA,
  volume =       85,
  number =       "Suppl.\,1",
  organization = "Acoustical Society of America ({\sc
          asa}), and European Acoustics Association ({\sc eaa})",
  address =  "Berlin",
  month =    mar,
  pages =    "S54",
  note =     "Proc.\ Joint Meeting of the 137th Regular
          Meeting of the Acoustical Society of America and the
          2nd Convention of the European Acoustics
          Association: Forum Acusticum; Signal Processing in Acoustics and Psychological and
          Pysiological Acoustics: Auditory Displays, 1pSP7",
  abstract =     "Rendering realistic spatial sound imagery for
          complex virtual environments must take into account
          the effects of obstructions such as reflectors and
          occluders. It is relatively well understood how to
          calculate the acoustical consequence that would be
          observed at a given observation point when an
          acoustically opaque object occludes a sound
          source. But the interference patterns generated by
          occluders of various geometries and orientations
          relative to the virtual source and receiver are
          computationally intense if accurate results are
          required. In many applications, however, it is
          sufficient to create a spatial image that is
          recognizable by the human listener as the sound of
          an occluded source. In the interest of improving
          audio rendering efficiency, a simplified filtering
          model was developed and its audio output submitted
          to psychophysical evaluation. Two perceptually
          salient components of occluder acoustics were
          identified that could be directly related to the
          geometry and orientation of a simple
          occluder. Actual occluder impulse responses measured
          in an anechoic chamber resembled the responses of a
          model incorporating only a variable duration delay
          line and a low-pass filter with variable cutoff frequency."
}

@Article{herder-sa:1998,
  author =   "Jens Herder",
  title =    "Tools and {Widgets} for {Spatial} {Sound}
          {Authoring}",
  journal =      "Computer Networks \& ISDN Systems",
  publisher =    "Elsevier Science",
  volume =   "30",
  number =   "20-21",
  pages =    "1933--1940",
  year =     1998,
  month =    oct,
  abstract =     "Broader use of virtual reality environments and sophisticated
    animations spawn a need for spatial sound. Until now, spatial sound
    design has been based very much on experience and trial and error. Most
    effects are hand-crafted, because good design tools for spatial sound
    do not exist. This paper discusses spatial sound authoring and its
    applications, including shared virtual reality environments based on
    VRML. New utilities introduced by this research are an
    inspector for sound sources, an interactive resource manager, and a
    visual soundscape manipulator. The tools are part of a sound
    spatialization framework and allow a designer/author of multimedia
    content to monitor and debug sound events. Resource constraints
    like limited sound spatialization channels can also be simulated."
}

@Article{herder-ssf-3d-forum:1998,
  author =   "Jens Herder",
  title =    "Sound {S}patialization {F}ramework: {A}n {A}udio {T}oolkit for {V}irtual {E}nvironments",
  journal =  "Journal of the 3D-Forum Society, Japan",
  year =     1998,
  volume =   "12",
  number =   "3",
  pages =    "17--22",
  month =    sep,
  abstract =     "The Sound Spatialization Framework is a C++ toolkit
          and development environment for providing advanced
          sound spatialization for virtual reality and
          multimedia applications. The Sound Spatialization
          Framework provides many powerful display and
          user-interface features not found in other sound
          spatialization software packages. It provides
          facilities that go beyond simple sound source
          spatialization: visualization and editing of the
          soundscape, multiple sinks, clustering of sound
          sources, monitoring and controlling resource
          management, support for various spatialization
          backends, and classes for {\sc midi} animation and
          handling."
}

@PhdThesis{herder-phd:1999,
  author =   "Jens Herder",
  title =    "A {S}ound {S}patialization {R}esource {M}anagement {F}ramework",
  school =   "University of Tsukuba",
  address = {Tsukuba, Japan}, 
  year =     1999,
  month =    jul,
  abstract =     "In a virtual reality environment, users are immersed in a scene with
    objects which might produce sound. The responsibility of a VR
    environment is to present these objects, but a practical system has only
    limited resources, including spatialization channels (mixels),
    MIDI/audio channels, and processing power. A sound
    spatialization resource manager, introduced in this thesis,
    controls sound resources and optimizes fidelity
    (presence) under given conditions, using a priority scheme based
    on psychoacoustics. Objects which are
    spatially close together can be coalesced by a novel
    clustering algorithm, which considers listener  localization
    errors. Application programmers and VR scene designers are
    freed from the burden of assigning mixels and predicting sound
    source locations. The framework includes an abstract interface
    for sound spatialization backends, an API for the VR
    environments, and multimedia authoring tools."
}


@article{herder-jasj-ja:1999,
        author = "Michael Cohen and Jens Herder and William L. Martens",
        title = "{Cyberspatial {A}udio {T}echnology}",
        journal = JASJ,
        year = 1999,
        volume = "55",
        number = "10",
        month = oct,
        pages = "730--731",
        note = "(Japanese)"
}

@article{herder-jaesj-en:1999,
        author = "Michael Cohen and Jens Herder and William L. Martens",
        title = "{Cyberspatial {A}udio {T}echnology}",
        journal = JAESJ,
        year = 1999,
        volume = "20",
        number = "6",
        pages = "389--395",
        month = nov,
    abstract = "Cyberspatial audio applications are distinguished from the broad range of spatial audio
applications in a number of important ways that help to focus this review.
Most significant is that cyberspatial audio is most often designed to be
responsive to user inputs.
In contrast to non-interactive auditory displays, cyberspatial auditory
displays
typically allow active exploration of the virtual environment in which users
find themselves.  Thus, at least some portion of the audio presented in a
cyberspatial environment must be selected, processed, or otherwise rendered
with minimum delay relative to user input.  Besides the technological demands
associated with realtime delivery of spatialized sound, the type
and quality of auditory experiences supported are also very different from
those associated with displays that support stationary sound localization."
}

@InProceedings{herder-clustering-hc:1999,
  author =   "Jens Herder",
  title =    "Optimization of {S}ound {S}patialization {R}esource {M}anagement through {C}lustering",
  booktitle =    "Second Int. Conf. on Human and Computer",
  year =     1999,
  organization = "University of Aizu",
  address =  "Aizu-Wakamatsu, Japan",
  month =    sep,
  pages =        "21:1-7",
  abstract =     "Level-of-detail is a concept well-known in computer graphics to reduce
the number of rendered polygons. Depending on the distance to the
subject (viewer), the
objects' representation is changed. A similar concept is the clustering
of sound sources for sound spatialization.
Clusters can be used to hierarchically organize mixels
and to optimize the use of resources, by grouping multiple sources
together into a single representative source.
Such a clustering process should minimize the error of position allocation of
elements, perceived as angle and distance, and also differences between
velocity relative to the sink (i.e., Doppler shift).
Objects with similar direction of motion
and speed (relative to sink) in the same acoustic resolution
cone and with similar distance to a
sink can be grouped together.
"
}

@InProceedings{herder-visualization-hc:1999,
  author =   "Jens Herder",
  title =    "Visualization of a {C}lustering {A}lgorithm of {S}ound {S}ources based on {L}ocalization {E}rrors",
  booktitle =    "Second Int.\ Conf.\ on Human and Computer",
  year =     1999,
  organization = "University of Aizu",
  address =  "Aizu-Wakamatsu, Japan",
  month =    sep,
  pages =        "22:1-5",
  abstract =     "A module for soundscape monitoring and visualizing resource management processes was extended for presenting clusters, generated by a novel sound source clustering algorithm. This algorithm groups multiple sound sources together into a single representative source, considering localization errors depending on listener orientation. Localization errors are visualized for each cluster using resolution cones. Visualization is done in runtime and allows understanding and evaluation of the clustering algorithm."
}

@Article{herder-clustering-3d-forum:1999,
  author =   "Jens Herder",
  title =    "Optimization of {S}ound {S}patialization {R}esource {M}anagement through {C}lustering",
  journal =  "Journal of the 3D-Forum Society, Japan",
  year =     1999,
  volume =   "13",
  number =   "3",
  month =    sep,
  pages =        "59--65",
  abstract =     "Level-of-detail is a concept well-known in computer graphics to reduce
the number of rendered polygons. Depending on the distance to the
subject (viewer), the
objects' representation is changed. A similar concept is the clustering
of sound sources for sound spatialization.
Clusters can be used to hierarchically organize mixels
and to optimize the use of resources, by grouping multiple sources
together into a single representative source.
Such a clustering process should minimize the error of position allocation of
elements, perceived as angle and distance, and also differences between
velocity relative to the sink (i.e., Doppler shift).
Objects with similar direction of motion
and speed (relative to sink) in the same acoustic resolution
cone and with similar distance to a
sink can be grouped together.
"
}

@Article{herder-visualization-3d-forum:1999,
  author =   "Jens Herder",
  title =    "Visualization of a {C}lustering {A}lgorithm of {S}ound {S}ources based on {L}ocalization {E}rrors",
  journal =  "Journal of the 3D-Forum Society, Japan",
  year =     1999,
  volume =   "13",
  number =   "3",
  month =    sep,
  pages =        "66--70",
  abstract =     "A module for soundscape monitoring and visualizing resource management processes was extended for presenting clusters, generated by a novel sound source clustering algorithm. This algorithm groups multiple sound sources together into a single representative source, considering localization errors depending on listener orientation. Localization errors are visualized for each cluster using resolution cones. Visualization is done in runtime and allows understanding and evaluation of the clustering algorithm."
}

@Article{herder-mmc-ja:2000,
  author =       {Jens Herder},
  title =    {Interactive {S}ound {S}patialization - a {P}rimer},
  journal =      {MM News, University of Aizu Multimedia Center},
  year =     {2000},
  volume =   {8},
  pages =    {8--12},
  month =    mar,
  note =     {(Japanese)},
  abstract =     "Sound spatialization is a technology which puts sound into the
                  three dimensional space, so that it has a perceivable direction
                  and distance. Interactive means mutually or reciprocally active.
                  Interaction is when one action (e.g., user moves mouse) has direct
                  or immediate influence to other actions (e.g., processing by a
                  computer: graphics change in size). Based on this definition an
                  introduction to sound reproduction using DVD and virtual
                  environments is given and illustrated by applications (e.g.,
                  virtual converts)."
}


@InProceedings{herder-cve:2000,
  author =   {Yasuhiro Yamazaki and Jens Herder},
  title =    {Exploring Spatial Audio Conferencing Functionality in Multiuser Virtual Environments},
  booktitle =    {The Third Int.\ Conf.\ on Collaborative Virtual Environments},
  pages =    {207-208},
  year =     2000,
  address =  {San Francisco, USA},
  month =    sep,
  organization = {ACM},
  abstract = "A chatspace was developed that allows conversation
with {\sc 3d} sound using networked streaming in a shared virtual
environment. The system provides an interface to advanced audio
features, such as a ``whisper function'' for conveying a confided audio
stream. This study explores the use of spatial audio to
enhance a user's experience in multiuser virtual environments."
}

@InProceedings{herder-mm:2000,
  author =   {Kenji Suzuki and Yuji Nishoji and Jens Herder},
  title =    {Implementation of Aural Attributes for Simulation of Room Effects in
Virtual Environments},
  booktitle =    {ACM Multimedia 2000},
  pages =    {439-441},
  year =     2000,
  address =  {Los Angeles, USA},
  month =    oct,
  organization = {ACM},
  abstract = "The audio design for virtual environments includes
              simulation of acoustical room properties besides
              specifing sound sources and sinks and their behavior.
              Virtual environments supporting room reverberation
              not only gain realism but also provide additional
              information to the user about surrounding space.
              Catching the different
              sound properties by the different spaces requires
              partitioning the space by the properties of aural spaces.
              We define soundscape and aural attributes as an
              application and multimedia content interface.
              Calculated data on an abstract level is sent
              to spatialization backends. Part of this research
              was the implementation of a device driver for the
              Roland Sound Space Processor. This device
              not only directionalizes sound sources, but also
              controls room effects like reverberation."
}
@InProceedings{herder-hc-chat:2000,
  author =   {Jens Herder and Yasuhiro Yamazaki},
  title =    {A Chatspace Deploying Spatial Audio for Enhanced Conferencing},
  booktitle =    {Third Int.\ Conf.\ on Human and Computer},
  year =     2000,
  organization = "University of Aizu",
  address =  "Aizu-Wakamatsu, Japan",
  month =    {sep},
  pages =        {197-202},
}

@InProceedings{herder-hc-psfc:2000,
  author =   {Kuniaki Honno and Kenji Suzuki and Jens Herder},
  title =    {Distance and Room Effects Control for the PSFC, an Auditory Display using a Loudspeaker Array},
  booktitle =    {Third Int.\ Conf.\ on Human and Computer},
  year =     2000,
  organization = "University of Aizu",
  address =  "Aizu-Wakamatsu, Japan",
  month =    {sep},
  pages =        {71-76},
  abstract =     "The Pioneer Sound Field Controller (PSFC), a loudspeaker
 array system, features realtime configuration of an entire sound field,
 including sound source direction, virtual distance, and context of
 simulated environment (room characteristics: room size and liveness)
 for each of two sound sources. In the PSFC system, there is
 no native parameter to specify the distance between the sound source
 and sound sink (listener) and also no function to control it
 directrly. This paper suggests the method to control virtual distance
 using basic parameters: volume, room size and liveness. The
 implementation of distance cue is an important aspect of 3D
 sounds. Virtual environments supporting room effects like
 reverberation not only gain realism but also provide additional
 information to users about surrounding space. The context switch of
 different aural attributes is done by using an API of the
 Sound Spatialization Framework. Therefore, when the sound sink move
 through two rooms, like a small bathroom and a large living room, the
 context of the sink switches and different sound is obtained."
}

@InProceedings{herder-vs:2000,
  author =   {Jens Herder},
  title =    {Challenges of Virtual Sets: From Broadcasting to Interactive Media},
  booktitle =    {Seventh Int.\ Workshop on Human Interface Technology},
  pages =    {13-17},
  year =     2000,
  address =      {Aizu-Wakamatsu, Japan},
  month =    {November},
  organization = {University of Aizu},
  abstract =    "Virtual sets have evolved from computer-generated, prerendered 2D backgrounds
to realtime, responsive 3D computer graphics and are nowadays standard
repertoire of broadcasting divisions. The graphics, which are combined
with real video feed becoming more
sophisticated, real looking and more responsive. We will look
at the recent developments and suggest further developments like
integration of spatial audio into the studio production
and generating interactive media streams. Educational institutes
recognize the demands of the rising media industry and established new courses
on media technology like the Duesseldorf University of Applied Sciences."
}

@Article{herder-psfc-3d-forum:2000,
  author =   {Kuniaki Honno and Kenji Suzuki and Jens Herder},
  title =    {Distance and Room Effects Control for the PSFC, an Auditory Display using a Loudspeaker Array},
  journal =  "Journal of the 3D-Forum Society, Japan",
  volume =   "14",
  number =   "4",
  year =     2000,
  month =    dec,
  pages =        {146--151},
  abstract =     "The Pioneer Sound Field Controller (PSFC), a loudspeaker
 array system, features realtime configuration of an entire sound field,
 including sound source direction, virtual distance, and context of
 simulated environment (room characteristics: room size and liveness)
 for each of two sound sources. In the PSFC system, there is
 no native parameter to specify the distance between the sound source
 and sound sink (listener) and also no function to control it
 directrly. This paper suggests the method to control virtual distance
 using basic parameters: volume, room size and liveness. The
 implementation of distance cue is an important aspect of 3D
 sounds. Virtual environments supporting room effects like
 reverberation not only gain realism but also provide additional
 information to users about surrounding space. The context switch of
 different aural attributes is done by using an API of the
 Sound Spatialization Framework. Therefore, when the sound sink move
 through two rooms, like a small bathroom and a large living room, the
 context of the sink switches and different sound is obtained."
}

@Article{herder-chat-3d-forum:2000,
  author =   {Jens Herder and Yasuhiro Yamazaki},
  title =    {A Chatspace Deploying Spatial Audio for Enhanced Conferencing},
  journal =  "Journal of the 3D-Forum Society, Japan",
  year =     2000,
  volume =   "15",
  number =   "1",
  month =    {},
  pages =        {},
}

@InProceedings{herder-ieee-vr:2001,
  author =   {Michael Cohen and Jens Herder and William Martens},
  title =    {Panel: Eartop computing and cyberspatial audio technology},
  booktitle =    {IEEE-VR2001: IEEE Virtual Reality},
  pages =    {322--323},
  year =     2001,
  address =  {Yokohama},
  month =    mar,
  organization = {IEEE},
  note =     {{\sc issn} 1087-8270; {\sc isbn} 0-7695-0948-7}
}

@InProceedings{herder-icc-hc:2001,
  author =   {Jens Herder},
  title =    {Interactive Content Creation with Virtual Set Environments},
  booktitle =    {Fourth Int.\ Conf.\ on Human and Computer},
  year =     2001,
  organization = "University of Aizu",
  address =  "Aizu-Wakamatsu, Japan",
  month =    sep,
  abstract =     {Digital broadcasting enables interactive {\sc tv}, which presents new challenges for interactive content creation. Besides the technology for streaming and viewing, tools and systems are under development that extend traditional {\sc tv} studios with virtual set environments. This presentation reviews current technology and describes the requirements for such systems. Interoperability over the production, streaming, and viewer levels requires open interfaces. As the technology allow more interaction, it becomes inherent difficult to control the quality of the viewers experience.
}
}

@Article{herder-icc-3d-forum:2001,
  author =   {Jens Herder},
  title =    {Interactive Content Creation with Virtual Set Environments},
  journal =      {Journal of the 3D-Forum Society, Japan},
  year =     2001,
  volume =   15,
  number =   4,
  pages =    {53--56},
  month =    dec,
  note =     {},
  abstract =     {Digital broadcasting enables interactive {\sc tv}, which presents new challenges for interactive content creation. Besides the technology for streaming and viewing, tools and systems are under development that extend traditional {\sc tv} studios with virtual set environments. This presentation reviews current technology and describes the requirements for such systems. Interoperability over the production, streaming, and viewer levels requires open interfaces. As the technology allow more interaction, it becomes inherent difficult to control the quality of the viewers experience.
}
}


@InProceedings{herder-art:2001,
  author =   {Jens Herder},
  title =    {Applications of Spatial Auditory Displays in the Context of Art and Music},
  booktitle =    {Human Supervision and Control in Engineering and Music},
  year =     2001,
  address =  {Kassel, Germany},
  month =    sep,
  organization = {Universit{\"a}t Kassel},
  abstract = {Auditory displays with the ability to place virtual sound sources into the space under realtime conditions enable advanced applications for art and music. The listener can be immersed on a high level while interacting and even participating in the experience.
We review some of those applications and discuss the required technology.},  
 keywords = {spatial auditory displays, interaction, spatial events, soundscape}
}

@Article{herder-3d-forum:2002,
  author =   {Jens Herder AND Ralf W{\"o}rzberger AND Uwe Twelker AND Stefan Albertz},
  title =    {Use of Virtual Environments in the Promotion and Evaluation of Architectural Designs},
  journal =  "Journal of the 3D-Forum Society, Japan",
  volume =   "16",
  number =   "4",
  year =     2002,
  month =    dec,
  pages =    {117--122},
  note =     {{\sc issn} 1342-2189},
  abstract =     "Virtual environments can create a realistic impression of an
architectural space during the architectural design process,
providing a powerful tool for evaluation and promotion
during a project's early stages. In comparison to pre-rendered animations,
such as walkthroughs based on {\sc cad} models, virtual environments can
offer intuitive interaction and a more life like experience.
Advanced virtual environments allow users to change realtime
rendering features with a few manipulations, switching between
different versions while still maintaining sensory immersion.
This paper reports on an experimental project in which architectural models
are being integrated into interactive virtual environments, and
includes demonstrations of both the possibilities and limitations
of such applications in evaluating, presenting and promoting architectural designs."
}


@InProceedings{herder-vr:2003,
  author = 	 {Jens Herder and Thomas Novotny},
  title = 	 {Spatial Sound Design and Interaction for Virtual Environments in the Promotion of Architectural Designs},
  booktitle = 	 {Third International Workshop on Spatial Media},
  pages = 	 {7--11},
  year = 	 2003,
  address = 	 {Aizu-Wakamatsu, Japan},
  month = 	 mar,
  organization = {University of Aizu},
  abstract = {Virtual environment walkthrough applications are generally enhanced by a user's interactions within a simulated architectural space, but the enhancement that stems from changes in spatial sound that are coupled with a user's behavior are particularly important, especially within regard to creating a sense of place. When accompanied by stereoscopic image synthesis, spatial sound can immerse the user in a high-realism virtual copy of the real world. An advanced virtual environment that allow users to change realtime rendering features with a few manipulations has been shown to enable switching between different versions of a modeled space while maintaining sensory immersion. This paper reports on an experimental project in which an architectural model is being integrated into such an interactive virtual environment. The focus is on the spatial sound design for supporting interaction, including demonstrations of both the possibilities and limitations of such applications in presenting and promoting architectural designs, as well as in three-dimensional sketching. 
}
}

@Article{herder-3d-forum:2003,
  author =   {Arnfried Griesert and Oliver Walczak and Jens Herder},
  title =    {Interactive Realtime Terrain Visualization for Virtual Set Applications},
  journal =  "Journal of the 3D-Forum Society, Japan",
  volume =   "17",
  number =   "4",
  year =     2003,
  month =    dec,
  pages =    {20--26},
  note =     {{\sc issn} 1342-2189},
  abstract =     "Virtual set environments for broadcasting become more sophisticated as well as the visual quality improves. Realtime interaction and production-specific visualization implemented through plugin mechanism enhance the existing systems like the virtual studio software 3DK. This work presents an algorithm which can dynamically manage textures of high resolution by prefetching them depending on their requirement in memory and map them on a procedural mesh in realtime. The main goal application of this work is the virtual representation of a flight over a landscape as part of weather reports in virtual studios and the interaction by the moderator. 
"
}

@InProceedings{herder-arvr:2004,
  author = 	 {Jens Herder and Kai Jaensch and Bruno Horst and Thomas Novotny},
  title = 	 {Testm{\"a}rkte in einer {Virtuellen} {Umgebung} - {Die} {Bestimmung} von {Preis}absatzfunktionen zur {Unterst\"utzung} des {Innovationsmanagements}},
  booktitle = 	 {Augmented \& {Virtual} {Reality} in der {Produktentstehung}},
  pages =	 {97--110},
  year =	 2004,
  editor =	 {J{\"u}rgen Gausemeier / Michael Grafe},
  series =	 {HNI-Schriftenreihe},
  volume =       {149},
  address =	 {Paderborn},
  month =	 jun,
  publisher =    {Heinz Nixdorf Institut, Universit{\"a}t Paderborn},
  note =	 {{\sc isbn} 3-935433-58-1},
  abstract =     {Multimediale Technologien werden in der Marktforschung immer
 st{\"a}rker eingesetzt, um flexible und kosteng{\"u}nstige Studien durchzuf{\"u}hren. Im Innovationsprozess kann dabei auf die langj{\"a}hrigen Erfahrungen zur{\"u}ckgegriffen werden, die durch den Einsatz der Computersimulation in der technischen Produktentwicklung zustande gekommen sind. In sehr fr{\"u}hen Phasen des Innovationsprozesses k{\"o}nnen durch Einsatz der neuen Technologien die Markteinf{\"u}hrungskonzepte f{\"u}r neue Produkte getestet werden. Die Applikationen der virtuellen Realit{\"a}t bieten ein einzigartiges Potential, neue Produkte einschlieï¬‚lich des Marketingkonzeptes zu testen, ohne dass dieses Produkt bereits physisch vorhanden sein muss. Am Beispiel eines Elementes des Marketingkonzeptes, der Preispolitik, zeigt die vorliegende Studie auf, welches Potential die virtuelle Kaufsituation von Produkten bietet. Der Fokus des Projektes liegt auf der interaktiven Produktpr{\"a}sentation in einer virtuellen Umgebung, die in eine Online-Befragung mit zus{\"a}tzlichen Werbefilmen eingebettet ist. Visuell hochwertige 3D-Produktpr{\"a}sentationen versetzen den Probanden in eine virtuelle Einkaufsumgebung, die einem realen Szenario entspricht. Die virtuellen Produkte werden in mehreren Kaufentscheidungsrunden zu unterschiedlichen Preisen angeboten. Der Preisuntersuchung geht eine Pr{\"a}sentation ausgew{\"a}hlter Werbespots sowie eine produktbezogene Befragung voraus. Im Anschluss an die virtuellen Preisentscheidungen werden die Eindr{\"u}cke sowie einige Kontrollgr{\"o}\ss{}en abgefragt. In weitergehenden Studien dieser Art k{\"o}nnen die Wirkungen mehrerer Marketing-Instrumente zu einem Zeitpunkt untersucht werden, in dem sich die Produkte noch im Entwicklungsprozess befinden. Auf diesem Weg lassen sich auch Wettbewerbsvorteile bestehender Produkte effizienter erkennen und nutzen. Mit den hoch entwickelten Computer- und Visualisierungstechnologien ist ein m{\"a}chtiges Werkzeug entstanden, das bereits f{\"u}r kommerzielle Pr{\"a}sentationen und Produktstudien eingesetzt wird. Zuk{\"u}nftig kann es auch in Kombination mit Internetanwendungen und klassischen Methoden der Marktforschung zu einem sehr fr{\"u}hen Zeitpunkt umfassende Erkenntnisse {\"u}ber ein Produkt liefern.
}
}

@InProceedings{herder-ivs-hc:2004,
  author =   {Jens Herder and Wolfgang Vonolfen and Arnfried Griesert and Stefan Heuer and Ansgar Hoffmann and Bernd H{\"o}ppner},
  title =    {Interactive Virtual Set Applications for Post Production},
  booktitle =    {Seventh Int.\ Conf.\ on Human and Computer},
  year =     2004,
  organization = "University of Aizu",
  address =  "Aizu-Wakamatsu, Japan",
  month =    sep,
  abstract =     {Virtual set environments for broadcasting become
more sophisticated as well as the visual quality improves.
Realtime interaction and production-specific
visualization implemented through plugin mechanism
enhance the existing systems like the 3DK.
This work presents the integration of the Intersense
IS-900 SCT camera tracking and 3D interaction into
the 3DK virtual studio environment. The main goal of
this work is the design of a virtual studio environment
for post productions, which includes video output as
well as media streaming formats such as MPEG-4. The
systems allows high quality offline rendering during
post production and 3D interaction by the moderator
during the recording.}
}

@InProceedings{herder-arvr:2005,
  author = 	 {Jens Herder and Ralf W{\"o}rzberger and Carsten Juttner and Uwe Twelker},
  title = 	 {Verwendung von {Grafikkarten}-{Prozessoren} ({GPU}s) f{\"u}r eine interaktive {Produkt}visualisierung in {Echtzeit} unter {Verwendung} von {Shadern} und {Video}texturen},
  booktitle = 	 {Augmented \& Virtual Reality in der Produktentstehung},
  pages =	 {23--36},
  year =	 2005,
  editor =	 {J{\"u}rgen Gausemeier / Michael Grafe},
  series =	 {HNI-Schriftenreihe},
  address =	 {Paderborn},
  volume =       {167},
  month =	 may,
  publisher =    {Heinz Nixdorf Institut, Universit{\"a}t Paderborn},
  note =	 {{\sc isbn} 3-935433-76-X},
  abstract =     {Die Visualisierung von Produkten in Echtzeit ist in vielen Bereichen ein hilfreicher Schritt, um potentiellen Kunden eine Vorstellung vom Einsatzgebiet und einen {\"U}berblick {\"u}ber die finale Anwendung zu erlauben. In den letzten Jahren haben neue Technologien in der Grafikkartenindustrie dazu gef{\"u}hrt, dass fr{\"u}her nur auf teuren Grafikworkstations verf{\"u}gbare M{\"o}glichkeiten nun auch mit relativ kosteng{\"u}nstigen Karten, welche f{\"u}r den Einsatz in Standard-PCs konzipiert wurden, realisierbar sind.
Es wird an einem Modellentwurf des Innenraums des People Cargo Movers gezeigt, wie die Beleuchtung innerhalb einer Echtzeitvisualisierung durch Shader realisiert werden kann. Als Lichtquelle wird dabei eine Landschaftsaufnahme herangezogen, welche als eine von mehreren Videotexturen eingebunden wurde. Auï¬‚erdem werden real im virtuellen Studio gefilmte Personen im Innenraum gleichermaï¬‚en {\"u}ber Videotexturen dargestellt und ebenfalls durch die Landschaft beleuchtet.
}
}


@Article{herder-3d-forum:2005,
  author =   {Tom Novotny and Kai Jaensch and Jens Herder},
  title =    {A Database Driven and Virtual Reality supported Environment for Marketing Studies},
  journal =  "Journal of the 3D-Forum Society, Japan",
  volume =   "19",
  number =   "4",
  year =     2005,
  month =    dec,
  pages =    {95--101},
  note =     {{\sc issn} 1342-2189},
  abstract =     "In today's market research mechanisms multi modal
technologies are significant tools to perform flexible
and price efficient studies for not only consumer
products but also consumer goods. Current appraisal
mechanisms in combination with applied computer
graphics can improve the assessment of a product's
launch in the very early design phase or an innovation
process. The combination of online questionnaires,
Virtual Reality (VR) applications and a database
management system offers a powerful tool to
let a consumer judge products as well as innovated
goods even without having produced a single article.
In this paper we present an approach of consumer
good studies consisting of common as well as interactive
VR product presentations and online questionnaires
bases on a bidirectional database management
solution to configure and manage numerous studies,
virtual sets, goods and participants in an effective
way to support the estimation of the received
data. Non-programmers can create their test environment
including a VR scenario very quickly without
any effort. Within the extensive knowledge of
consumer goods, marketing instruments can be de-
fined to shorten and improve the rollout process in
the early product stages.
"
}

@Article{herder-3d-forum-videolight:2006,
  author =   {Carsten Juttner and Jens Herder},
  title =    {Lighting an Interactive Scene in Real-time with a GPU and Video
Textures},
  journal =  "Journal of the 3D-Forum Society, Japan",
  volume =   "20",
  number =   "1",
  year =     2006,
  month =    apr,
  pages =    {22--28},
  note =     {{\sc issn} 1342-2189},
  abstract =     "The presentation of virtual environments in real time
has always been a demanding task. Specially designed
graphics hardware is necessary to deal with the large
amounts of data these applications typically produce.
For several years the chipsets that were used allowed
only simple lighting models and fixed algorithms. But
recent development has produced new graphics processing
units (GPUs) that are much faster and more
programmable than their predecessors. This paper
presents an approach to take advantage of these new
features. It uses a video texture as part of the lighting
calculations for the passenger compartment of a virtual
train and was run on the GPU of a recent PC graphics
card. The task was to map the varying illumination
of a filmed landscape onto the virtual objects and also
onto another video texture (showing two passengers),
thereby enhancing the realism of the scene.
"
}

@Article{herder-3d-forum-soundradiation:2006,
  author =   {Holger Struchholz and Jens Herder and Dieter Leckschat},
  title =    {Sound radiation simulation of musical instruments based on
interpolation and filtering of multi-channel recordings},
  journal =  "Journal of the 3D-Forum Society, Japan",
  volume =   "20",
  number =   "1",
  year =     2006,
  month =    apr,
  pages =    {41--47},
  note =     {{\sc issn} 1342-2189},
  abstract =     "With the virtual environment developed here, the characteristic
sound radiation patterns of musical instruments can be experienced
in real-time.
The user may freely move around a musical instrument, thereby
receiving acoustic and visual feedback in real-time. The perception
of auditory and visual effects is
intensified by the combination of acoustic and visual elements, as
well as the option of user interaction. The simulation of
characteristic sound radiation patterns is based on interpolating
the intensities of a multichannel recording and offers a near-natural
mapping of the sound radiation patterns. Additionally, a simple
filter has been developed, enabling the qualitative simulation of
an instrument's characteristic sound radiation patterns to be
easily implemented within real-time 3D applications. Both methods
of simulating sound radiation patterns have been evaluated for a
saxophone with respect to their functionality and validity by
means of spectral analysis and an auditory experiment.
"
}


@InProceedings{herder-arvr:2006,
  author = 	 {Jens Herder and Kai Jaensch and Katharina Garbe},
  title = 	 {Haptische {Interaktionen} in {Test}umgebungen f{\"u}r {Produkt}pr{\"a}sentation in {Virtuellen} {Umgebungen}},
  booktitle = 	 {Augmented \& Virtual Reality in der Produktentstehung},
  pages =	 {87--99},
  year =	 2006,
  editor =	 {J{\"u}rgen Gausemeier / Michael Grafe},
  series =	 {HNI-Schriftenreihe},
  address =	 {Paderborn},
  volume =       {188},
  month =	 may,
  publisher =    {Heinz Nixdorf Institut, Universit{\"a}t Paderborn},
  note =	 {{\sc isbn} 3-939350-07-9},
  abstract =     {Durch den vermehrten Einsatz von multimedialen Technologien werden in der Marktforschung die M{\"o}glichkeiten der Durchf{\"u}hrung flexibler und kosteng{\"u}nstiger Studien gegeben. In sehr fr{\"u}hen Phasen des Innovationsprozesses als Teil der Marktforschung k{\"o}nnen durch Einsatz von Virtuellen Umgebungen die Markteinf{\"u}hrungskonzepte f{\"u}r neue Produkte getestet werden. Mittels Anwendungen der Virtuellen Realit{\"a}t k{\"o}nnen neue Produkte einschlieï¬‚lich des Marketingkonzeptes auch haptisch getestet werden, ohne dass dieses Produkt bereits physisch vorhanden sein muss.
Informationen werden dem Benutzer in Virtuellen Umgebungen haupts{\"a}chlich visuell und erg{\"a}nzend auditiv {\"u}bermittelt. Verbreitete Benutzerschnittstellen sind Interaktionsger{\"a}te wie Stylus und Wand. Durch die haptische Wahrnehmung werden Informationen menschengerechter, effektiver und intuitiver wahrgenommen. Objekte in einer virtuellen Umgebung k{\"o}nnen durch den Einsatz haptischer Interaktionsger{\"a}te ertastet und erf{\"u}hlt werden und machen dadurch eine differenziertere Beurteilung und Einsch{\"a}tzung durch den Benutzer eben dieser Objekte m{\"o}glich. Der Fokus des vorliegenden Projektes liegt daher auf der interaktiven haptischen Produktpr{\"a}sentation in einer virtuellen Einkaufsumgebung, die in Online-Befragungen mit zus{\"a}tzlichen Werbefilmen eingebettet ist.
Als Nebenprodukt wurde das Werkzeug Open Inventor um Knoten zur Modellierung von haptischen Szeneneigenschaften erweitert.
}
}

@InProceedings{herder-mc:2006,
  author = 	 {Jens Herder and Ralf Kronenwett and Simone Lambertz and Georg Kiefer and Johann Freihoff},
  title = 	 {Interaktive {Echtzeit}-{3D}-{Visualisierung} {Webbasierte} {Darstellung}: {Mobilisierung} und {Homing} von {Blut}stammzellen},
  booktitle = 	 {Mensch \& Computer 2006: Mensch und Computer im StrukturWandel},
  pages =	 {405--409},
  year =	 2006,
  editor =	 {A. M. Heinecke, H. Paul},
  address =	 {M{\"u}nchen},
  month =	 sep,
  publisher =	 {Oldenbourg Verlag},
  abstract = {
	  Die interaktive Echtzeit 3D-Visualisierung Mobilisierung und Homing
	  von Blutstammzellen wurde konzipiert, um ein sehr komplexes
	  medizinisches Wissen mit den Mitteln der 3-dimensionalen
	  Visualisierung in Echtzeit und des Internets sowie der daraus
	  resultierenden Interaktivit{\"a}t aufzubereiten. Dies musste auf einer
	  Ebene geschehen, die es hinterher auch jedem Nicht-Mediziner
	  erlaubt, die grundlegenden biologischen und medizinischen
	  Sachverhalte nachzuvollziehen. Das Resultat: Eine informative und
	  didaktische Anwendung, aus einer Mischung von interaktiven
	  3D-Stationen und erkl{\"a}renden 3D-Animationen. Diskutiert werden
	  die Methodik der Konzeptionsphase und die Interaktionstechniken.
}
}

@InProceedings{herder-hc:2006,
  author =   {Jens Herder},
  title =    {Matching Light for Virtual Studio TV Productions},
  booktitle =    {9th Int.\ Conf.\ on Human and Computer},
  pages =	 {158--162},
  year =     2006,
  organization = "University of Aizu",
  address =  "Aizu-Wakamatsu, Japan",
  month =    sep,
  abstract =     {High dynamic range ({\sc hdr}) environments maps based on still images or
video streams are used for computer animation or interactive systems.
The task of realistic light setup of scenes using captured environment maps might be eased as well as the visual quality improves. In this article, we discuss the light setting problem for virtual studio ({\sc tv}) productions which have mixed scenes of real objects, actors, virtual objects and virtual backgrounds as well as light set up at the studio with blue box and light set up for the computer graphics objects. Benefits of {\sc hdr} interactive light control are that the real light in the studio does not have to be remodeled and the artistic impression by using the light in the studio will be also captured. The virtual studio ({\sc tv}) layout and system become more complex to handle this new feature of studio light capturing. The analysis of system requirements identifies the technical challenges.}
}
@InProceedings{herder-gis:2007,
  author =   {Jochen Schirrwagen and Jens Herder and Michael Uwe M{\"o}bius and Katharina Regulski},
  title =    {Charakteristiken einer netzgest{\"u}tzten wissenschaftlichen {K}ommunikation und {U}msetzung in {I}nfrastruktur und {P}ublikationsformen},
  booktitle =    {German e-Science Conference 2007},
  pages =	 {},
  year =     2007,
  organization = "University of Aizu",
  address =  "Baden-Baden",
  month =    may,
  note = {{\tt http://www.ges2007.de}},
  abstract =     {
	Neue Formen der wissenschaftlichen Kommunikation basieren
	auf Fortschritten in den Informations-
	und Kommunikationstechnologien. Das dadurch m{\"o}gliche
	kollaborative wissenschaftliche Arbeiten liefert Ergebnisse,
	die in vielf{\"a}ltigen Formaten, als Text, Simulationsdaten
	oder multimediale Elemente vorliegen. Daraus ergeben sich
	besondere Anforderungen an Publikations- und
	Kommunikatonsinfrastrukturen, wie Interoperabilit{\"a}t,
	Repr{\"a}sentation, Verteilung und Archivierung derartiger
	komplexer digitaler Objekte. Mit der Initiative Digital Peer
	Publishing existiert eine Infrastruktur f{\"u}r das Publizieren
	in elektronischen Zeitschriften. Dieses Publikationsformat
	erlaubt neben einem schnellen Wissenstransfer eine umfassende
	Repr{\"a}sentation wissenschaftlicher Ergebnisse. Das Journal
	of Virtual Reality and Broadcasting als Teil dieser Initiative
	zeigt am Beispiel des elektronischen Publikationsprozesses
	den Stand der Wissensvernetzung in seiner Community, sowie
	aktuelle Entwicklungen um die Erweiterung innovativer Funktionen.
}
}


@InProceedings{herder-arvr:2007,
  author = 	 {Oliver Rattay and Christian Geiger and Jens Herder and Gernot Goebbels and Igor Nikitin},
  title = 	 {Zweih{\"a}ndige {I}nteraktion in {VR}-{Umgebungen}},
  booktitle = 	 {Augmented \& Virtual Reality in der Produktentstehung},
  pages =	 {315--332},
  year =	 2007,
  editor =	 {J{\"u}rgen Gausemeier / Michael Grafe},
  series =	 {HNI-Schriftenreihe},
  address =	 {Paderborn},
  volume =       {209},
  month =	 jun,
  publisher =    {Heinz Nixdorf Institut, Universit{\"a}t Paderborn},
  note =	 {{\sc isbn} 978-3-939350-28-6},
  abstract =     {
	Einfach benutzbare VR-Anwendungen erfordern andere
	Interaktionstechniken als konventionelle Desktop-Anwendungen
	mit Maus, Tastatur und Desktop-Metapher zur Verf{\"u}gung stellen.
	Da solche Ans{\"a}tze in Konzeption und Realisierung deutlicher
	komplexer sind, m{\"u}ssen diese mit Sorgfalt ausgew{\"a}hlt
	werden. Folgt man der Argumentation, dass VR eine nat{\"u}rliche
	Interaktion mit virtuellen Objekten erm{\"o}glicht, so f{\"u}hrt
	dies fast zwangsl{\"a}ufig zu zweih{\"a}ndigen Interaktionstechniken
	f{\"u}r virtuelle Umgebungen, da Benutzer in realen Umgebungen
	gewohnt sind, fast ausschlieï¬‚lich zweih{\"a}ndig zu agieren. In
	diesem Beitrag geben wir eine {\"U}bersicht {\"u}ber den Stand
	der Technik im Bereich zweih{\"a}ndiger Interaktion, leiten
	Anforderungen an eine Entwicklung zweih{\"a}ndiger
	Interaktionstechniken in VR ab und beschreiben einen eigenen Ansatz.
	Dabei geht es um die zweih{\"a}ndige Interaktion bei der Simulation
	flexibler biegeschlaffer Bauteile (z. B. Schlauchverbindungen).
}
}


@InProceedings{herder-hdr-vs:2007,
  author =   {Jens Herder and Christian Neider and Shinichi Kinuwaki},
  title =    {HDR-based lighting estimation for virtual studio (TV) environments},
  booktitle =    {10th Int.\ Conf.\ on Human and Computer},
  pages =	 {111--117},
  year =     2007,
  organization =  {FH D{\"u}sseldorf, University of Aizu},
  address =  {D{\"u}sseldorf, Germany, Aizu-Wakamatsu, Japan},
  month =    dec,
  abstract =     {
Two high dynamic range ({\sc hdr}) environments maps based on
video streams from fish-eye lens cameras are used for generating virtual lights in a virtual set renderer.
The task of realistic virtual light setup of scenes using captured environment maps might be eased as well as visual quality improves. We discuss the light setting problem for virtual studio ({\sc tv}) productions which have mixed scenes of real objects, actors, virtual objects and virtual backgrounds.  Benefits of {\sc hdr} interactive light control are that the real light in the studio does not have to be remodeled and the artistic impression by using the light in the studio is also captured. An analysis of system requirements identifies technical challenges. We discuss the properties of a prototype system including test production.
}
}

@InProceedings{herder-ar-audio:2007,
  author =   {Katharina Garbe and Iris Herbst and Jens Herder},
  title =    {Spatial Audio for Augmented Reality},
  booktitle =    {10th Int.\ Conf.\ on Human and Computer},
  pages =	 {53--58},
  year =     2007,
  organization = {FH D{\"u}sseldorf, University of Aizu},
  address =  {D{\"u}sseldorf, Germany, Aizu-Wakamatsu, Japan},
  month =    dec,
  abstract =     {
Using spatial audio successfully for augmented reality (AR) applications is a challenge, but is awarded with an improved user experience. Thus, we have extended the AR/VR framework {\sc Morgan} with spatial audio to improve users orientation in an AR application. In this paper, we investigate the users' capability to localize and memorize spatial sounds (registered with virtual or real objects). We discuss two scenarios. In the first scenario, the user localizes only sound sources and in the second scenario the user memorizes the location of audio-visual objects. Our results reflect spatial audio performance within the application domain and show which technology pitfalls still exist. Finally, we provide design recommendations for spatial audio AR environments. 
}
}

@InProceedings{herder-depth-vs:2009,
  author =   {Jens Herder and Michael Wilke and Julia Heimbach and Sebastian G{\"o}bel and Dionysios Marinos},
  title =    {Simple Actor Tracking for Virtual TV Studios Using a Photonic Mixing Device},
  booktitle =    {12th Int.\ Conf.\ on Human and Computer},
  pages =	 {},
  year =     2009,
  organization =  {University of Aizu},
  address =  {Hamamatsu / Aizu-Wakamatsu / D{\"u}sseldorf},
  month =    dec,
  abstract =     {
Virtual TV studios use actor tracking systems for resolving the occlusion of computer graphics and studio camera image. The actor tracking delivers the distance between actor and studio camera. We deploy a photonic mixing device, which captures a depth map and a luminance image at low resolution. The renderer engines gets one depth value per actor using the OSC protocol. We describe the actor recognition algorithm based on the luminance image and the depth value calculation. We discuss technical issues like noise and calibration.
}
}

@inproceedings{herder-vibro-vs:2009,
 author = {W{\"o}ldecke, Bj{\"o}rn and Vierjahn, Tom and Flasko, Matthias and Herder, Jens and Geiger, Christian},
 title = {Steering actors through a virtual set employing vibro-tactile feedback},
 booktitle = {TEI '09: Proceedings of the 3rd International Conference on Tangible and Embedded Interaction},
 year = {2009},
 isbn = {978-1-60558-493-5},
 pages = {169--174},
 location = {Cambridge, United Kingdom},
 doi = {http://doi.acm.org/10.1145/1517664.1517703},
 publisher = {ACM},
 address = {New York, NY, USA},
 abstract = {
Actors in virtual studio productions are faced with the challenge that they have to interact with invisible virtual objects because these elements are rendered separately and combined with the real image later in the production process. Virtual sets typically use static virtual elements or animated objects with predefined behavior so that actors can practice their performance and errors can be corrected in the post production. With the demand for inexpensive live recording and interactive TV productions, virtual objects will be dynamically rendered at arbitrary positions that cannot be predicted by the actor. Perceptive aids have to be employed to support a natural interaction with these objects. In our work we study the effect of haptic feedback for a simple form of interaction. Actors are equipped with a custom built haptic belt and get vibrotactile feedback during a small navigational task (path following). We present a prototype of a wireless vibrotactile feedback device and a small framework for evaluating haptic feedback in a virtual set environment. Results from an initial pilot study indicate that vibrotactile feedback is a suitable non-visual aid for interaction that is at least comparable to audio-visual alternatives used in virtual set productions.
 }
}
@inproceedings{herder-vibro-direction:2009,
  author = {Tom Vierjahn and W{\"o}ldecke, Bj{\"o}rn and Christian Geiger and Jens Herder},
  title = {Improved Direction Signalization Technique Employing Vibrotactile Feedback},
  booktitle = {11th Virtual Reality International Conference, VRIC'2009},
  location = {Laval, France},
  month = Apr,
  year = {2009},
  abstract = {
Vibrotactile feedback via body-worn vibrating belts is a common means of direction signalization - e.g. for navigational tasks. Consequently such feedback devices are used to guide blind or visually impaired people but can also be used to support other wayfinding tasks - for instance, guiding actors in virtual studio productions. Recent effort has been made to simplify this task by integrating vibrotactile feedback into virtual studio applications. In this work we evaluate the accuracy of an improved direction signalization technique, utilizing a body-worn vibrotactile belt with a limited number of tactors, and compare it to other work. The results from our user study indicate that it is possible to signalize different directions accurately, even with a small number of tactors spaced by 90Â°.},
}
% 2010
@inproceedings{herder-archery-vs:2010,
 author = {Christian Geiger and Jens Herder and Sebastian G{\"o}bel and Christin Heinze and Dionysios Marinos},
 title = {Design and Virtual Studio Presentation of a Traditional Archery Simulator},
 booktitle = {Proceedings of the Entertainment Interfaces Track 2010 at Interaktive Kulturen 2010},
 location = {Duisburg},
 note = {CEUR-WS.org/Vol-634, ISSN 1613-0073, urn:nbn:de:0074-634-4},
 year = 2010,
 abstract = {
In this paper we describe the design of a virtual reality simulator for traditional intuitive archery. Traditional archers aim without a target figure. Good shooting results require an excellent body-eye coordination that allows the user to perform identical movements when drawing the bow. Our simulator provides a virtual archery experience and supports the user to learn and practice the motion sequence of traditional archery in a virtual environment. We use an infrared tracking system to capture the userâ€™s movements in order to correct his movement. To provide a realistic haptic feedback a real bow is used as interaction device. Our system provides a believable user experience and supports the user to learn how to shoot in the traditional way. Following a user-centered iterative design approach we developed a number of prototypes and evaluated them for refinement in sequent iteration cycles. For illustration purposes we created a short video clip in our virtual studio about this project that presents the main ideas in an informative yet entertaining way.},
}

@InProceedings{herder-vibrotactile-vs:2010,
  author =   {Monika Klapdohr and Bj{\"o}rn W{\"o}ldecke and Dionysios Marinos and Jens Herder and Christian Geiger and Wolfgang Vonolfen},
  title =    {Vibrotactile Pitfalls: Arm Guidance for Moderators in Virtual TV Studios},
  booktitle =    {13th Int.\ Conf.\ on Human and Computer},
  pages =	 {},
  year =     2010,
  organization =  {University of Aizu},
  address =  {Hamamatsu / Aizu-Wakamatsu / D{\"u}sseldorf},
  month =    dec,
  abstract =     {
For this study, an experimental vibrotactile feedback system was developed to help actors with the task of moving their arm to a certain place in a virtual tv studio under live conditions. Our intention is to improve interaction with virtual objects in a virtual set, which are usually not directly visible to the actor, but only on distant displays. Vibrotactile feedback might improve the appearance on tv because an actor is able to look in any desired direction (camera or virtual object) or to read text on a teleprompter while interacting with a virtual object. Visual feedback in a virtual studio lacks spatial relation to the actor, which impedes the adjustment of the desired interaction. The five tactors of the implemented system which are mounted on the tracked arm give additional information like collision, navigation and activation. The user study for the developed system shows that the duration for reaching a certain target is much longer in case no visual feedback is given, but the accuracy is similar. In this study, subjects reported that an activation signal indicating the arrival at the target of a drag \& drop task was helpful. In this paper, we discuss the problems we encountered while developing such a vibrotactile display. Keeping these pitfalls in mind could lead to better feedback systems for actors in virtual studio environments.
}
}
@InProceedings{herder-softshadows-vs:2010,
  author =   {H{\"u}seyin Ayten and Jens Herder and Wolfgang Vonolfen},
  title =    {Visual Acceptance Evaluation of Soft Shadow Algorithms for Virtual TV Studios},
  booktitle =    {13th Int.\ Conf.\ on Human and Computer},
  pages =	 {},
  year =     2010,
  organization =  {University of Aizu},
  address =  {Hamamatsu / Aizu-Wakamatsu / D{\"u}sseldorf},
  month =    dec,
  abstract =     {
Shadows in computer graphics are an important rendering aspect for spatial objects. For realtime computer applications such as games, it is essential to represent shadows as accurate as possible. Also, various tv stations work with virtual studio systems instead of real studio sets. Especially for those systems, a realistic impression of the rendered and mixed scene is important. One challenge, hence, is the creation of a natural shadow impression. This paper presents the results of an empirical study to compare the performance and quality of different shadow mapping methods. For this test, a prototype studio renderer was developed. A percentage closer filter (pcf) with a number of specific resolutions is used to minimize the aliasing issue. More advanced algorithms which generate smooth shadows like the percentage closer soft shadow (pcss) method as well as the variance shadow maps (vsm) method are analysed. Different open source apis are used to develop the virtual studio renderer, giving the benefit of permanent enhancement. The Ogre 3D graphic engine is used to implement the rendering system, benefiting from various functions and plugins. The transmission of the tracking data is accomplished with the vrpn server/client and the Intersense api. The different shadow algorithms are compared in a virtual studio environment which also casts real shadows and thus gives a chance for a direct comparison throughout the empirical user study. The performance is measured in frames per second.}
}
@INPROCEEDINGS{herder-iterative-mr:2010,
  author = {Patrick Pogscheba and J\"org St\"ocklein and Jens Herder and Christian Geiger},
  title = {Iteratives Mixed-Reality-Prototyping und virtuelle Studiopr\"asentation einer Steuerung f\"ur ein Indoor-Luftschiff},
  address = {Fellbach-Stuttgart},
  booktitle = {7. Workshop Virtuelle und Erweiterte Realit\"at der GI-Fachgruppe VR/AR},
  year = 2011,
  abstract = {Dieser Beitrag beschreibt einen Ansatz zum iterativen Entwurf von Interaktionstechniken fŸr Mixed-Reality-Applikationen. Wir pr\"asentieren die Entwicklung eines SW-Frameworks f\"ur die Entwicklung derartiger Anwendungen. Ausgehend vom Anwendungsszenario der Steuerung eines Indoor-Zeppelin entwickeln wir Anforderungen an den prototypischen Entwurf von Interaktionstechniken auf unterschiedlichen Abstraktions- ebenen. Ein zentraler Gedanke ist dabei der Einsatz unterschiedlicher Kombinationen von virtuellen und realen Elementen wie dies im Mixed-Reality-Kontinuum nach Milgram vorgeschlagen wird. Neben der effizienten Entwicklung iterativer Prototypen dienen die Zwischenergebnisse auch der technischen Dokumentation des Projekts auf Basis aktueller Virtual-Studio-Technologien.
  }
}
% 2011
@INPROCEEDINGS{herder-touchplanvs:2011,
    author = {Jens Herder and Constantin Brosda and Sascha Djuderija and Daniel Drochtert and {\"O}mer Genc and Stephan Joeres and Patrick Kellerberg and Simon Looschen and Christian Geiger and Bj{\"o}rn W{\"o}ldecke},
    title = {{TouchPlanVS} - A Tangible Multitouch Planning System for Virtual TV Studio Productions},
    booktitle = {Proc. 3DUI 2011 (IEEE Symp. on 3D User Interfaces)},
    year = 2011,
    month = mar,
    pages = {103 -- 104},
    address = {Singapore},
    abstract = {
    This article presents a new approach of integrating tangible user feedback in todays virtual {\sc tv} studio productions. We describe a tangible multitouch planning system, enabling multiple users to prepare and customize scene flow and settings. Users can collaboratively view and interact with virtual objects by using a tangible user interface on a shared multitouch surface. The in a {\sc 2d} setting created {\sc tv} scenes are simultaneously rendered on an external monitor, using a production/target renderer in {\sc 3d}. Thereby the user experiences a closer reproduction of a final production. Subsequently, users are able to join together the scenes into one complex plot. Within the developing process, a video prototype of the system shows the user interaction and enables early reviews and evaluations. The requirement analysis is based on expert interviews. 
    }
}

@INPROCEEDINGS{herder-radarTHEREMIN:2011,
    author = {Bj{\"o}rn W{\"o}ldecke and Dionysios Marinos and Patrick Pogscheba and Chris Geiger and Jens Herder and Tobias Schwirten},
    title = {{radarTHEREMIN} - Creating MusicalÂ ExpressionsÂ in a Virtual Studio Environment},
    booktitle = {Proc. ISVRI 2011 (International Symposium on VR Innovation)},
    year = 2011,
    month = mar,
    pages = {345 -- 346},
    address = {Singapore},
    abstract = {
    In this paper we describe a prototypical system for live musical performance in a  virtual studio environment. The performer stands in front of the studio camera and interacts with an infrared-laser-based multitouch device. The final TV image shows the performer interacting with a virtual screen which is augmented in front of herself. To overcome the problem of the performer not seeing this virtual screen in reality, we use a special hexagonal grid to facilitate the performer's awareness of this novel Theremin-like virtual musical instrument.
    }
}

@INPROCEEDINGS{herder-binocular:2011,
  author = {Matthias Flasko and Patrick Pogscheba and Jens Herder and Wolfgang Vonolfen},
  title = {Heterogeneous binocular camera-tracking in a Virtual Studio},
  address = {Wedel},
  booktitle = {8. Workshop Virtuelle und Erweiterte RealitŠt der GI-Fachgruppe VR/AR},
  year = 2011,
  abstract = {
This paper presents a tracking of parts of a human body in a virtual TV studio environment. The tracking is based on a depth camera and a HD studio camera and aims at a realistic interaction between the actor and the computer generated environment. Stereo calibration methods are used to match corresponding pixels of both cameras (HD color and depth image). Hence the images were rectified and column aligned. The disparity is used to correct the depth image pixel by pixel. This image registration results in row and column aligned images where ghost regions are in the depth image resulting from occlusion. Both images are used to generate foreground masks with chroma and depth keying. The color image is taken for skin color segmentation to determine and distinguish the actor's hands and face. In the depth image the flesh colored regions were used to determine their spatial position. The extracted positions were augmented by virtual objects. The scene is rendered correctly with virtual camera parameters which were calculated from the camera calibration parameters. Generated computer graphics with alpha value are combined with the HD color images. This compositing shows interaction with augmented objects for verification. The additional depth information results in changing the size of objects next to the hands when the actor moves around.
}
}
% 2012	
@INPROCEEDINGS{herder-moderator-tracking:2012,
  author = {Dionysios Marinos and Chris Geiger and Jens Herder},
  title = {Large-Area Moderator Tracking and Demonstrational Configuration of Position Based Interactions for Virtual Studios},
  booktitle = {10th European Interactive TV Conference},
  address = {Berlin},
  month = jul,
  year = 2012,
  abstract = {
In this paper we introduce a system for tracking persons walking or standing on a large planar surface and for using the acquired data to easily configure position based interactions for virtual studio productions. The tracking component of the system, radarTRACK, is based on a laser scanner device capable of delivering interaction points on a large configurable plane. By using the device on the floor it is possible to use the delivered data to detect feet positions and derive the position and orientation of one or more users in real time. The second component of the system, named OscCalibrator, allows for the easy creation of multidimensional linear mappings between input and output parameters and the routing of OSC messages within a single modular design environment. We demonstrate the use of our system to flexibly create position-based interactions in a virtual studio environment.
}
}

@INPROCEEDINGS{herder-touchplanvslite:2012,
    author = {Constantin Brosda and Jeff Daemen and Sascha Djuderija and Stephan Joeres and Oleg Langer and Andre Schweitzer and Andreas Wilhelm and Jens Herder},
    title = {{TouchPlanVS Lite} -- A Tablet-based Tangible Multitouch Planning System for Virtual TV Studio Productions},
    booktitle = {Proceedings of the 2012 Joint International Conference on Human-Centered Computer Environments},
    series = {HCCE '12},
     year = 2012,
     isbn = {978-1-4503-1191-5},
     location = {Aizu-Wakamatsu and Hamamatsu and Duesseldorf, Japan and Germany},
     pages = {64--67},
     numpages = {4},
     url = {http://doi.acm.org/10.1145/2160749.2160764},
    doi = {10.1145/2160749.2160764},
    acmid = {2160764},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {application streaming, capacitive touch screens, planning tools, tangibles, virtual studio},
    month = mar,
    abstract = {
    This paper presents a mobile approach of integrating tangible user feedback in todayâ€™s virtual TV studio productions. We describe a tangible multitouch planning system, enabling a single user to prepare and customize scene flow and settings. Users can view and interact with virtual objects by using a tangible user interface on a capacitive multitouch surface. In a 2D setting created TV scenes are simultaneously rendered as separate view using a production/target renderer in 3D. Thereby the user experiences a closer reproduction of a final production and set assets can be reused. Subsequently, a user can arrange scenes on a timeline while maintaining different versions/sequences. The system consists of a tablet and a workstation, which does all application processing and rendering. The tablet is just an interface connected via wireless LAN.
    }
}

@INPROCEEDINGS{herder-inearguide:2012,
  author = {Philipp Ludwig and Joachim B{\"u}chel and Jens Herder and Wolfgang Vonolfen},
  title = {In{E}ar{G}uide - A Navigation and Interaction Feedback System using In Ear Headphones for Virtual TV Studio Productions},
  address = {D{\"u}sseldorf},
  booktitle = {9. Workshop Virtuelle und Erweiterte Realit{\"a}t der GI-Fachgruppe VR/AR},
  year = 2012,
  abstract = {
This paper presents an approach to integrate non-visual user feedback in today's virtual tv studio productions. Since recent studies showed that systems providing vibro-tactile feedback are not sufficient for replacing the common visual feedback, we developed an audio-based solution using an in ear headphone system, enabling a talent to move, avoid and point to virtual objects in a blue or green box. The system consists of an optical head tracking system, a wireless in ear monitor system and a workstation, which performs all application and audio processing. Using head related transfer functions, the talent gets directional and distance cues. Past research showed, that generating reflections of the sounds and simulating the acoustics of the virtual room helps the listener to conceive the acoustical feedback, we included this technique as well. In a user study with 15 participants the performance of the system was evaluated.}
}

@proceedings{herder-editor-gi-vrar:2012,
  title     = {Virtuelle und {Erweiterte} {Realit{\"a}t} â 9. {Workshop} der {GI}-{Fachgruppe} {VR/AR}},
  editor    = {Christian Geiger and Jens Herder and Tom Vierjahn},
  url       = {https://www.shaker.de/de/content/catalogue/index.asp?lang=de&ID=8&ISBN=978-3-8440-1309-2},
  isbn = {978-3-8440-1309-2},
  month = sep,
  year      = {2012},
  location = {D\"usseldorf, Germany},
  publisher = {Shaker Verlag},
  address = {Herzogenrath, Germany},
  keywords = {VR Systeme; Virtuelle RealitŠt; Interaction Design; Visualisierung; Mixed Reality; Interaktion; Rendering und Simulation; Tracking},
  abstract = {
 Der neunte Workshop Virtuelle und Erweiterte Realit\"at der Fachgruppe VR/AR der Gesellschaft f\"ur Informatik e.V. wurde an der FH D\"usseldorf vom 19.09. - 20.09.2012 durchgef\"uhrt. Dies ist der Tagungsband des neunten Workshops zum Thema Virtuelle und Erweiterte Realit\"at, die von der Fachgruppe VR/AR der Gesellschaft f\"ur Informatik e.V. ins Leben gerufen wurde. Als etablierte Plattform fŸr den Informations- und Ideenaustausch der deutschsprachigen VR/AR-Szene bot der Workshop den idealen Rahmen, aktuelle Ergebnisse und Vorhaben aus Forschung und Entwicklung Ð auch provokative Ideen Ð im Kreise eines fachkundigen Publikums zur Diskussion zu stellen. Insbesondere wollten wir auch jungen Nachwuchswissenschaftlern die M\"oglichkeit geben, ihre Arbeiten zu pr\"asentieren; darunter fielen laufende Promotionsvorhaben oder auch herausragende studentische Arbeiten von Hochschulen und Forschungseinrichtungen. Eine Auswahl der besten Artikel aus dem Workshop werden in einer Sonderausgabe des Journal of Virtual Reality and Broadcasting (JVRB) ver\"offentlicht. F\"ur einen Beitrag eigneten sich alle Themenfelder der Virtuellen und Erweiterten Realit\"at, insbesondere:
Eingabeger\"ate und Interaktionstechniken,
3D Audio,
Avatare,
Displaytechnologien,
VR Tabletops,
Simulatoren,
Echtzeitrendering,
Haptische Interaktion,
Human Factors,
Industrielle Einsatzszenarien,
Innovative Anwendungen,
Intelligente Umgebungen,
Modellierung und Simulation,
Systemarchitekturen,
Tracking,
Verteilte und kooperative VR/AR-L\"osungen,
VR und Gesellschaft,
VR und Kunst,
VR-Spiele,
Lehrkonzepte zur VR/AR.
  }
}

% 2013
@INPROCEEDINGS{herder-osvs:2013,
    author = {Jeff Daemen and Peter Haufs-Brusberg and Jens Herder},
    title = {Markerless Actor Tracking for Virtual (TV) Studio Applications},
    booktitle = {International Joint Conference on Awareness Science and Technology \& Ubi-Media Computing},
    series = {iCAST 2013 \& UMEDIA 2013},
     year = 2013,
     isbn = {},
     location = {Aizu-Wakamatsu and Hamamatsu and Duesseldorf, Japan and Germany},
     pages = {},
     numpages = {6},
     url = {http://vsvr.medien.hs-duesseldorf.de/publications/hc2013-osvs-abstract.html},
    doi = {10.1109/ICAwST.2013.6765544},
    acmid = {2160764},
    publisher = {IEEE},
     month = nov,
    abstract = {
    Virtual (TV) studios gain much more acceptance through improvements in computer graphics and camera tracking. Still commercial studios cannot have full interaction between actors and virtual scene because actors data are not completely digital available as well as the feedback for actors is still not sufficient. Markerless full body tracking might revolutionize virtual studio technology as it allows better interaction between real and virtual world. This article reports about using a markerless actor tracking in a virtual studio with a tracking volume of nearly 40 cubic meter enabling up to three actors within the green box. The tracking is used for resolving the occlusion between virtual objects and actors so that the renderer can output automatically a mask for virtual objects in the foreground in case the actor is behind. It is also used for triggering functions scripted within the renderer engine, which are attached to virtual objects, starting any kind of action (e.g., animation). Last but not least the system is used for controlling avatars within the virtual set. All tracking and rendering is done within a studio frame rate of 50 Hz with about 3 frames delay. The markerless actor tracking within virtual studios is evaluated by experts using an interview approach. The statistical evaluation is based on a questionnaire.
    }
}
@INPROCEEDINGS{herder-vron:2013,
    author = {Jose Burga and Jeff Daemen and Sascha Charlie Djuderija and Maren Gnehr and Lars Goossens
    		 and Sven Hartz and Peter Haufs-Brusberg and Jens Herder and Mohammed Ibrahim and Nikolas Koop
		 and Christophe Leske and Laurid Meyer and Antje M{\"u}ller and Bj{\"o}rn Salgert and Richard Schroeder and Simon Thiele}, 
   booktitle = {10th International Conference on Visual Media Production (CVMP 2013)},
   title = {Four Metamorphosis States in a Distributed Virtual (TV) Studio: Human, Cyborg, Avatar, and Bot},
   note = {short paper},
   address = {London},
   month = nov,
   year = 2013,
   abstract = {
The major challenge in virtual studio technology is the interaction between the actor and virtual objects. Within a distributed live production, two locally separated markerless tracking systems where used simultaneously alongside a virtual studio. The production was based on a fully tracked actor, cyborg (half actor, half graphics), avatar, and a bot. All participants could interact and throw a virtual disc. This setup is compared and mapped to Milgramâ€™s continuum and technical challenges are described.
   }
}
% 2014
@INPROCEEDINGS{herder-ace-spiderfeedback:2014,
    author = {Jonathan Simsch and Jens Herder}, 
   title = {SpiderFeedback - Visual Feedback for Orientation in Virtual {\sc TV} Studios},
   booktitle = {{\emph{ACE'14}}, 11th Advances in Computer Entertainment Technology Conference},
   address = {Funchal, Portugal},
   month = nov,
   year = 2014,
   doi = {10.1145/2663806.2663830},
   publisher = {ACM},
   abstract = {
A visual and spatial feedback system for orientation in virtual sets of virtual \textsc{tv} studios was developed and evaluated. It is based on a green proxy object, which moves around in the acting space by way of four transparent wires. A separate unit controls four winches and is connected to an engine, which renders the virtual set. A new developed plugin registers a virtual object's position with the proxy object which imitates the virtual object's movement on stage. This will allow actors to establish important eye contact with a virtual object and feel more comfortable in a virtual set. Furthermore, interaction with the virtual object and its proxy can be realised through a markerless actor tracking system. Several possible scenarios for user application were recorded and presented to experts in the broadcast industry, who evaluated the potential of SpiderFeedback in interviews and by questionnaires.
   }
}

@InProceedings{herder-subtle-animations:2014,
  author =   {Jens Herder and Fabian B\"untig and Jeff Daemen and Jaroslaw Lang and Florian L\"uck and Mitja S\"ager and Roluf S\"orensen and Markus Hermanni and Wolfgang Vonolfen},
  title =    {Subtle Animations using Talent Tracking in a Virtual (TV) Studio},
  booktitle =    {17th Int.\ Conf.\ on Human and Computer},
  year =     2014,
  organization =  {Shizuoka University},
  address =  {Hamamatsu / Aizu-Wakamatsu / D{\"u}sseldorf},
  url = {http://vsvr.medien.hs-duesseldorf.de/productions/sa2014/},
  month =    dec,
  abstract =     {Markerless talent tracking is widely used for interactions and animations within virtual environments. In a virtual (tv) studio talents could be overburden by inter- action tasks because camera and text require extensive attention. We take a look into animations and inter- actions within a studio, which do not require any special attention or learning. We show the generation of an artificial shadow from a talent, which ease the key- ing process, where separation of real shadows from the background is a difficult task. We also demonstrate animations of footsteps and dust. Furthermore, capturing talentsÕ height can also be used to adjust the parameters of elements in the virtual environment, like the position and scaling of a virtual display. In addition to the talents, a rigid body was tracked as placeholder for graphics, easing the interaction tasks for a talent. Two test productions show the possibilities, which subtle animations offer. In the second production, the rendering was improved (shadows, filtering, normal maps, ...) and instead of using the rigid body to move an object (a flag), the animation was only controlled by the handÕs position.
}
}

@InProceedings{herder-spider-ar:2014,
  author =   {Jonathan Simsch and Jens Herder},
  title =    {Spider {AR} - {Markerless} Augmented Reality System for Stage Flight Systems},
  booktitle =    {17th Int.\ Conf.\ on Human and Computer},
  year =     2014,
  organization =  {Shizuoka University},
  address =  {Hamamatsu / Aizu-Wakamatsu / D{\"u}sseldorf},
  url ={http://vsvr.medien.hs-duesseldorf.de/productions/sf2014/},
  month =    dec,
  abstract =     {}
}
% 2015
@INPROCEEDINGS{herder-grapp-modal:2015,
    author = {Jannik Fiedler and Stefan Rilling and Manfred Bogen and Jens Herder}, 
   title = {Multimodal Interaction Techniques in Scientific Data Visualization: An Analytical Survey},
   booktitle = {{\emph{GRAPP 2015}}, 10th International Conference on Computer Graphics Theory and Applications},
   address = {Berlin, Germany},
   month = mar,
   year = 2015,
   pages={431-437},
   doi={10.5220/0005296404310437},
   isbn={978-989-758-087-1},
   abstract = {
The interpretation process of complex data sets makes the integration of effective interaction techniques crucial. Recent work in the field of human-computer interaction has shown that there is strong evidence that multimodal user interaction, i.e. the integration of various input modalities and interaction techniques into one comprehensive user interface, can improve human performance when interacting with complex data sets. However, it is still unclear which factors make these user interfaces superior to unimodal user interfaces. The contribution of this work is an analytical comparison of a multimodal and a unimodal user interface for a scientific visualization application. We show that multimodal user interaction with simultaneously integrated speech and gesture input improves user performance regarding efficiency and ease of use. 
   }
}


@incollection{herder-virtualities:2015,
year={2015},
isbn={978-3-319-17042-8},
booktitle={Virtual Realities},
volume={8844},
series={Lecture Notes in Computer Science},
editor={Brunnett, Guido and Coquillart, Sabine and van Liere, Robert and Welch, Gregory and V\'{a}\v{s}a, Libor},
doi={10.1007/978-3-319-17043-5_2},
title={Four Metamorphosis States in a Distributed Virtual (TV) Studio: Human, Cyborg, Avatar, and Bot - Markerless Tracking and Feedback for Realtime Animation Control},
url={http://dx.doi.org/10.1007/978-3-319-17043-5_2},
publisher={Springer International Publishing},
keywords={Markerless tracking; Virtual studio; Avatars; Virtual characters; Interaction feedback},
author={Herder, Jens and Daemen, Jeff and Haufs-Brusberg, Peter and Abdel Aziz, Isis},
pages={16-32},
language={English},
abstract = {
The major challenge in virtual studio technology is the interaction between actors and virtual objects. Virtual studios differ from other virtual environments because there always exist two concurrent views: The view of the tv consumer and the view of the talent in front of the camera. This paper illustrates the interaction and feedback in front of the camera and compares different markerless person tracking systems, which are used for realtime animations. Entertaining animations are required, but sensors usually provide only a limited number of parameters. Additional information based on the context allows the generation of appealing animations, which might be partly prefabricated. As main example, we use a distributed live production in a virtual studio with two locally separated markerless tracking systems. The production was based on a fully tracked actor, cyborg (half actor, half graphics), avatar, and a bot. All participants could interact and throw a virtual disc. This setup is compared and mapped to Milgram's continuum and technical challenges are described in detail.
}
}
% 2016
@INPROCEEDINGS{herder-tvx-rob:2016,
    author = {Jeff Daemen and Jens Herder and Cornelius Koch and Philipp Ladwig and Roman Wiche and Kai Wilgen}, 
   title = {Semi-Automatic Camera and Switcher Control for Live Broadcast},
   booktitle = {{\emph{ TVX'16}}, International Conference on Interactive Experiences for Television and Online Video},
   address = {Chicago, IL, USA},
   month = jun,
   year = 2016,
   pages = {129--134 },
   doi = {10.1145/2932206.2933559},
   isbn = {978-1-4503-4067-0},
   publisher = {ACM},
   abstract = {
   Live video broadcasting requires a multitude of professional expertise to enable multi-camera productions. Robotic systems allow the automation of common and repeated tracking shots. However, predefined camera shots do not allow quick adjustments when required due to unpredictable events. We introduce a modular automated robotic camera control and video switch system, based on fundamental cinematographic rules. The actors' positions are provided by a markerless tracking system. In addition, sound levels of actors' lavalier microphones are used to analyse the current scene. An expert system determines appropriate camera angles and decides when to switch from one camera to another. A test production was conducted to observe the developed prototype in a live broadcast scenario and served as a video-demonstration for an evaluation.
   }
 }
 
@INPROCEEDINGS{herder-gesture:2016,
  author = {Marina Ballester Ripoll and Jens Herder and Philipp Ladwig and Kai Vermeegen},
  title = {Comparison of two Gesture Recognition Sensors for Virtual TV Studios},
  location = {Bielefeld, Germany},
  booktitle = {13. Workshop Virtuelle und Erweiterte Realit{\"a}t der GI-Fachgruppe VR/AR},
  year = 2016,
  month = sep,
  isbn = {978-3-8440-4718-9},
  editor = {Thies Pfeiffer and Julia Fr\"ohlich and Rolf Kruse},
  publisher = {Shaker Verlag},
 address = {Herzogenrath, Germany},
  abstract = {
In order to improve the interactivity between users and computers, recent technologies focus on incorporating gesture recognition into interactive systems. The aim of this article is to evaluate the effectiveness of using a Myo control armband and the Kinect 2 for recognition of gestures in order to interact with virtual objects in a weather report scenario. The Myo armband has an inertial measurement unit and is able to read electrical activity produced by skeletal muscles, which can be recognized as gestures, which are trained by machine learning. A Kinect sensor was used to build up a dataset which contains motion recordings of 8 different gestures and was also build up by a gesture training machine learning algorithm. Both input methods, the Kinect 2 and the Myo armband, were evaluated with the same interaction patterns in a user study, which allows a direct comparison and reveals benefits and limits of each technique.}
}
% 2017
@inproceedings {herder-egve.20171360,
booktitle = {ICAT-EGVE 2017 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments},
editor = {Robert W. Lindeman and Gerd Bruder and Daisuke Iwai},
title = {{Towards Precise, Fast and Comfortable Immersive Polygon Mesh Modelling: Capitalising the Results of Past Research and Analysing the Needs of Professionals}},
author = {Ladwig, Philipp and Herder, Jens and Geiger, Christian},
year = {2017},
publisher = {The Eurographics Association},
ISSN = {1727-530X},
ISBN = {978-3-03868-038-3},
DOI = {10.2312/egve.20171360},
abstract ={More than three decades of ongoing research in immersive modelling has revealed many advantages of creating objects in virtual environments. Even though there are many benefits, the potential of immersive modelling has only been partly exploited due to unresolved problems such as ergonomic problems, numerous challenges with user interaction and the inability to perform exact, fast and progressive refinements. This paper explores past research, shows alternative approaches and proposes novel interaction tools for pending problems. An immersive modelling application for polygon meshes is created from scratch and tested by professional users of desktop modelling tools, such as Autodesk Maya, in order to assess the efficiency, comfort and speed of the proposed application with direct comparison to professional desktop modelling tools.
}
}

@inproceedings{herder-rot-streamspace:2017:ART:3132787.3132813,
 author = {Ryskeldiev, Bektur and Cohen, Michael and Herder, Jens},
 title = {Applying Rotational Tracking and Photospherical Imagery to Immersive Mobile Telepresence and Live Video Streaming Groupware},
 booktitle = {SIGGRAPH Asia 2017 Mobile Graphics \& Interactive Applications},
 series = {SA '17},
 year = {2017},
 isbn = {978-1-4503-5410-3},
 location = {Bangkok, Thailand},
 pages = {5:1--5:2},
 articleno = {5},
 numpages = {2},
 url = {http://doi.acm.org/10.1145/3132787.3132813},
 doi = {10.1145/3132787.3132813},
 acmid = {3132813},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {groupware, live streaming, mixed reality, mobile computing, photospherical imagery, rotational tracking, social media, spatial media, telepresence},
} 

@article{herder-fktg-rob:2017,
 author = {Jeff Daemen and Jens Herder and Cornelius Koch and Philipp Ladwig and Roman Wiche and Kai Wilgen},
 title = {Halbautomatische {Steuerung} von {Kamera} und {Bildmischer} bei {Live}-{{\"U}bertragungen}},
 journal = {Fachzeitschrift f\"ur Fernsehen, Film und Elektronische Medien},
 publisher = {FKTG, Fernseh- und Kinotechnische Gesellschaft e.V.},
 address = {Harbach, Deutschland},
 pages = {501--505},
 month = nov,
 year =2017,
 abstract = {
Live-Video-Broadcasting mit mehreren Kameras erfordert eine Vielzahl von Fachkenntnissen. Robotersysteme erm\"oglichen zwar die Automatisierung von g\"angigen und wiederholten Tracking-Aufnahmen, diese erlauben jedoch keine kurzfristigen Anpassungen aufgrund von unvorhersehbaren Ereignissen. In diesem Beitrag wird ein modulares, automatisiertes Kamerasteuerungs- und Bildschnitt-System eingef\"uhrt, das auf grundlegenden kinematografischen Regeln basiert. Die Positionen der Akteure werden durch ein markerloses Tracking-System bereitgestellt. DarŸber hinaus werden Tonpegel der Lavaliermikrofone der Akteure zur Analyse der aktuellen Szene verwendet. Ein Expertensystem ermittelt geeignete Kamerawinkel und entscheidet, wann von einer Kamera auf eine andere umgeschaltet werden soll. Eine Testproduktion wurde durchgef\"uhrt, um den entwickelten Prototyp in einem Live-Broadcast-Szenario zu beobachten und diente als Videodemonstration f\"ur eine Evaluierung.
},
 note = {Der Artikel ist eine \"Ubersetzung von dem Konferenzbeitrag Semi-Automatic Camera and Switcher Control for Live Broadcast, International Conference on Interactive Experiences for Television and Online Video, TVX'2016, Chicago, IL, USA, ACM, DOI=10.1145/2932206.2933559, June 22-24, 2016.},
 keywords = {Halbautomatische Roboterkamerasteuerung, Verfolgen von Darstellern, Bildmischer, Szenenanalyse, Filmregeln, Automatische Bildauswahl}
}
% 2018
@article{herder-StreamSpace2:2018,
	author = "Bektur Ryskeldiev and Michael Cohen and Jens Herder",
	title = "StreamSpace: Pervasive Mixed Reality Telepresence for Remote Collaboration on Mobile Devices",
	journal = "Journal of Information Processing",
	year = 2018,
	month = feb,
	volume = 26,
	number = 1,
	pages = "177-185",
	doi = "10.2197/ipsjjip.26.177",
	abstract = "We present a system that takes advantage of mobile rotational tracking and photospherical imagery to allow
users to share their environment with remotely connected peers ``on the go.'' We examined similar solutions and
developed an application that creates a mixed reality space with spatially-oriented live video feeds. Users can collaborate
through audio, video, and three-dimensional drawings in virtual space. The developed system was tested in a
preliminary user study, where we detected an increase in spatial and situational awareness among viewers. Believing
that our system provides a novel style of collaboration in mixed reality environments, we discuss future applications
and extensions of our prototype.",
	url = "http://www.ipsj.or.jp/english/index.html"
}

@conference{herder-grapp-mre:2018,
author={Jens Herder and Philipp Ladwig and Kai Vermeegen and Dennis Hergert and Florian Busch and Kevin Klever and Sebastian Holthausen and Bektur Ryskeldiev},
title={Mixed Reality Experience - How to Use a Virtual (TV) Studio for Demonstration of Virtual Reality Applications},
booktitle={Proceedings of the 13th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - Volume 1: GRAPP},
  address = {Santa Cruz, Portugal},
  year=2018,
  month = jan,
pages={281-287},
publisher={SciTePress},
organization={INSTICC},
doi={10.5220/0006637502810287},
isbn={978-989-758-287-5},
abstract = {
The article discusses the question of ÒHow to convey the experience in a virtual environment to third parties?Ó and explains the different technical implementations which can be used for live streaming and recording of a mixed reality experience. The real-world applications of our approach include education, entertainment, e-sports, tutorials, and cinematic trailers, which can benefit from our research by finding a suitable solution for their needs. We explain and outline our Mixed Reality systems as well as discuss the experience of recorded demonstrations of different VR applications, including the need for calibrated camera lens parameters based on realtime encoder values.
   }
}

@inproceedings{herder-blockchain:2018:DMC:3174910.3174952,
 author = {Ryskeldiev, Bektur and Ochiai, Yoichi and Cohen, Michael and Herder, Jens},
 title = {Distributed Metaverse: Creating Decentralized Blockchain-based Model for Peer-to-peer Sharing of Virtual Spaces for Mixed Reality Applications},
 booktitle = {Proceedings of the 9th Augmented Human International Conference},
 series = {AH '18},
 year = {2018},
 isbn = {978-1-4503-5415-8},
 location = {Seoul, Republic of Korea},
 pages = {39:1--39:3},
 articleno = {39},
 numpages = {3},
 url = {http://doi.acm.org/10.1145/3174910.3174952},
 doi = {10.1145/3174910.3174952},
 acmid = {3174952},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {Blockchain, Groupware, Mixed Reality, Mobile Computing, Photospherical Imagery, Social Media, Spatial Media, Telepresence},
 abstract = {Mixed reality telepresence is becoming an increasingly popular form of interaction in social and collaborative applications. We are interested in how created virtual spaces can be archived, mapped, shared, and reused among different applications. Therefore, we propose a decentralized blockchain-based peer-to-peer model of distribution, with virtual spaces represented as blocks. We demonstrate the integration of our system in a collaborative mixed reality application and discuss the benefits and limitations of our approach.
 }
} 
@inproceedings{herder-spotility:2018:SCT:3272973.3274100,
 author = {Ryskeldiev, Bektur and Igarashi, Toshiharu and Zhang, Junjian and Ochiai, Yoichi and Cohen, Michael and Herder, Jens},
 title = {Spotility: Crowdsourced Telepresence for Social and Collaborative Experiences in Mobile Mixed Reality},
 booktitle = {Companion of the 2018 ACM Conference on Computer Supported Cooperative Work and Social Computing},
 series = {CSCW '18},
 year = {2018},
 isbn = {978-1-4503-6018-0},
 location = {Jersey City, NJ, USA},
 pages = {373--376},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/3272973.3274100},
 doi = {10.1145/3272973.3274100},
 acmid = {3274100},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {groupware, mixed reality, mobile computing, remote collaboration, spatial media, telepresence, video streaming},
 abstract = {
 Live video streaming is becoming increasingly popular as a form of interaction in social applications. One of its main advantages is an ability to immediately create and connect a community of remote users on the spot. In this paper we discuss how this feature can be used for crowdsourced completion of simple visual search tasks (such as finding specific objects in libraries and stores, or navigating around live events) and social interactions through mobile mixed reality telepresence interfaces. We present a prototype application that allows users to create a mixed reality space with a photospherical imagery as a background and interact with other connected users through viewpoint, audio, and video sharing, as well as realtime annotations in mixed reality space. Believing in the novelty of our system, we conducted a short series of interviews with industry professionals on the possible applications of our system. We discuss proposed use-cases for user evaluation, as well as outline future extensions of our system.
 }
} 

@proceedings{herder-editor-gi-vrar:2018,
  title     = {Virtuelle und {Erweiterte} {Realit{\"a}t} â 15. {Workshop} der {GI}-{Fachgruppe} {VR/AR}},
  editor    = {Jens Herder and Christian Geiger and Ralf D{\"o}rner and Paul Grimm},
  doi       = {10.2370/9783844062151},
  url       = {https://www.shaker.de/de/content/catalogue/index.asp?lang=de\&ID=8\&ISBN=978-3-8440-6215-1},
  isbn = {978-3-8440-6215-1},
  month = oct,
  year      = {2018},
  location = {DŸsseldorf, Germany},
  publisher = {Shaker Verlag},
  address = {Herzogenrath, Germany},
  keywords = {VR Systeme; Virtuelle RealitŠt; Interaction Design; Visualisierung; Mixed Reality; Interaktion; Rendering und Simulation; Tracking},
  abstract = {
  Der f\"unfzehnte Workshop Virtuelle und Erweiterte Realit\"at der Fachgruppe VR/AR der Gesellschaft fŸr Informatik e.V. wurde an der Hochschule DŸsseldorf vom 10.-11.10.2018 durchgef\"uhrt. Als etablierte Plattform fŸr den Informations- und Ideenaustausch der deutschsprachigen VR/AR-Szene bot der Workshop den idealen Rahmen, aktuelle Ergebnisse und Vorhaben aus Forschung und Entwicklung Ð auch provokative Ideen Ð im Kreise eines fachkundigen Publikums zur Diskussion zu stellen. Insbesondere wurde auch jungen Nachwuchswissenschaftlern die M\"oglichkeit gegeben, ihre Arbeiten zu pr\"asentieren; darunter fielen laufende Promotionsvorhaben oder auch herausragende studentische Arbeiten von Hochschulen und Forschungseinrichtungen. Das Programmkomitee hat 12 Lang- und 6 KurzbeitrŠge aus Ÿber 29 Gesamteinreichungen ausgew\"ahlt. Die Beitr\"age decken das Spektrum der Virtuellen und Erweiterten Realit\"at ab. Der Workshop fand am 10. Oktober gleichzeitig mit dem Innovationstag des Innovationshubs in D\"usseldorf statt. FŸr einen Beitrag eigneten sich alle Themenfelder der Virtuellen und Erweiterten Realit\"at, insbesondere:
3D Eingabeger\"ate und Interaktionstechniken,
Avatare und Agenten,
Displaytechnologien und Tracking,
(Echtzeit-)Rendering,
Education und Edutainment,
Entertainment und Experiences,
Gesellschaft und soziotechnische Aspekte,
Human Factors,
Industrielle Einsatzszenarien,
Innovative Anwendungen,
KŸnstlerische Anwendungen,
Modellierung und Simulation,
Multimodale Interaktion,
Systemarchitekturen und Intelligente Umgebungen,
Verteilte und kooperative VR/AR-Umgebungen.
  }
}

@inproceedings{herder-steamvr-camera-tracking:2018,
  author    = {Vermeegen, Kai and Herder, Jens},
  title     = {A Lighthouse-based Camera Tracking System for Professional Virtual Studios},
  booktitle = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t â 15. Workshop der GI-Fachgruppe VR/AR},
  editor    = {Jens Herder and Christian Geiger and Ralf D{\"o}rner and Paul Grimm},
  pages     = {19 -- 26},
  doi       = {10.2370/9783844062151},
  isbn = {978-3-8440-6215-1},
  url       = {http://vsvr.medien.hs-duesseldorf.de/publications/gi-vrar2018-lighthouse-vs/},
  month = oct,
  year      = {2018},
  location = {DŸsseldorf, Germany},
  publisher = {Shaker Verlag},
 address = {Herzogenrath, Germany},
  abstract = {
  This article describes the possibilities and problems that occur using the SteamVR tracking 2.0 system as a camera tracking system in a virtual studio and explains an approach for implementation and calibration within a professional studio environment. The tracking system allows for cost effective deployment. Relevant application fields are also mixed reality recording and streaming of AR and VR experiences.
  },
  keywords = {Camera Tracking, SteamVR, Vive, Virtual (TV) Studio, Augmented Virtuality, Virtual Reality, Mixed Reality}
}

@inproceedings{herder-filter:2018,
  author    = {Felix Paul and Jens Herder},
  title     = {A model-based filtering approach for real-time human motion data},
  booktitle = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t â 15. Workshop der GI-Fachgruppe VR/AR},
  editor    = {Jens Herder and Christian Geiger and Ralf D{\"o}rner and Paul Grimm},
  pages     = {37 -- 44},
  doi       = {10.2370/9783844062151},
  isbn = {978-3-8440-6215-1},
  url       = {http://vsvr.medien.hs-duesseldorf.de/publications/gi-vrar2018-filter/},
  month = oct,
  year      = {2018},
  location = {DŸsseldorf, Germany},
  publisher = {Shaker Verlag},
 address = {Herzogenrath, Germany},
  abstract = {
  Acquiring human motion data from video images plays an important role in the field of computer vision. Ground truth tracking systems require markers to create high quality motion data. But in many applications it is desired to work without markers. In recent years affordable hardware for markerless tracking systems was made available at a consumer level. Efficient depth camera systems based on Time-of-Flight sensors and structured light systems have made it possible to record motion data in real time. However, the gap between the quality of marker-based and markerless systems is high. The error sources of a markerless motion tracking pipeline are discussed and a model-based filter is proposed, which adapts depending on spatial location. The proposed method is then proven to be more robust and accurate than the unfiltered data stream and can be used to visually enhance the presence of an actor within a virtual environment in live broadcast productions.
  },
  keywords = {time of flight, markerless motion tracking, smoothing filter, virtual studio, shadows}
}

@inproceedings{herder-ar-in-ndt:2018,
  author    = {Robert Deppe and Oliver Nemitz and Jens Herder},
  title     = {Augmented reality for supporting manual non-destructive ultrasonic testing of metal pipes and plates},
  booktitle = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t â 15. Workshop der GI-Fachgruppe VR/AR},
  editor    = {Jens Herder and Christian Geiger and Ralf D{\"o}rner and Paul Grimm},
  pages     = {45 -- 52},
  doi       = {10.2370/9783844062151},
  isbn = {978-3-8440-6215-1},
  url       = {http://vsvr.medien.hs-duesseldorf.de/publications/gi-vrar2018-ar-in-ndt/},
  month = oct,
  year      = {2018},
  location = {DŸsseldorf, Germany},
  publisher = {Shaker Verlag},
 address = {Herzogenrath, Germany},
  abstract = {
  We describe an application of augmented reality technology for non-destructive testing of products in the metal-industry. The prototype is created with hard- and software, that is usually employed in the gaming industry, and delivers positions for creating ultra- sonic material scans (C-scans). Using a stereo camera in combination with an hmd enables realtime visualisation of the probes path, as well as the setting of virtual markers on the specimen. As a part of the implementation the downhill simplex optimization algorithm is implemented to fit the specimen to a cloud of recorded surface points. The accuracy is statistically tested and evaluated with the result, that the tracking system is accurate up to ca. 1-2 millimeters in well set-up conditions. This paper is of interest not only for research institutes of the metal-industry, but also for any areas of work, in which the enhancement with augmented reality is possible and a precise tracking is necessary.
  },
  keywords = {Nondestructive Testing, Ultrasonic, Augmented Reality, Tracking, Stereo camera, head mounted display, NDT, AR}
}

@inproceedings{herder-ar-space:2018,
  author    = {Artur Baranowski and Sebastian Utzig and Philipp Fischer and Andreas Gerndt and Jens Herder},
  title     = {{3D} spacecraft configuration using immersive {AR} technology},
  booktitle = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t â 15. Workshop der GI-Fachgruppe VR/AR},
  editor    = {Jens Herder and Christian Geiger and Ralf D{\"o}rner and Paul Grimm},
  pages     = {71 -- 82},
  doi       = {10.2370/9783844062151},
  isbn = {978-3-8440-6215-1},
  url       = {http://vsvr.medien.hs-duesseldorf.de/publications/gi-vrar2018-ar-space/},
  month = oct,
  year      = {2018},
  location = {DŸsseldorf, Germany},
  publisher = {Shaker Verlag},
 address = {Herzogenrath, Germany},
  abstract = {
  In this paper we propose an integrated immersive augmented reality solution for a software tool supporting spacecraft design and verification. The spacecraft design process relies on expertise in many domains, such as thermal and structural engineering. The various subsystems of a spacecraft are highly interdependent and have differing requirements and constraints. In this context, interactive visualizations play an important role in making expert knowledge accessible. Recent immersive display technologies offer new ways of presenting and interacting with computer-generated content. Possibilities and challenges for spacecraft configuration employing these technologies are explored and discussed. A user interface design for an application using the Microsoft HoloLens is proposed. To this end, techniques for selecting a spacecraft component and manipulating its position and orientation in 3D space are developed and evaluated. Thus, advantages and limitations of this approach to spacecraft configuration are revealed and discussed. },
  keywords = {spacecraft configuration, human computer interaction, augmented reality, 3D user interfaces, HoloLens}
}

@inproceedings{herder-ieeevr-avatar:2019,
	author = {Jens Herder and Nico Brettschneider and Jeroen de Mooij and Bektur Ryskeldiev},
	title = {Avatars for Co-located Collaborations in HMD-based Virtual Environments},
	booktitle = {IEEE VR 2019, 26th IEEE Conference on Virtual Reality and 3D User Interfaces},
	address = {Osaka},
	month = mar,
	year = 2019,
	abstract = {
Multi-user virtual reality is transforming towards a social activity that is no longer only used by remote users, but also in large-scale location-based experiences. Usage of realtime-tracked avatars in co-located business-oriented applications with a Óguide-user-scenarioÓ is examined for user-related factors of Spatial Presence, Social Presence, User Experience and Task Load. A user study was conducted in order to compare both techniques of a realtime-tracked avatar and a non-visualised guide. Results reveal that the avatar-guide enhanced and stimulated communicative processes while facilitating interaction possibilities and creating a higher sense of mental immersion for users and engagement.},
	keywords = {Virtual Reality, Co-located Collaborations, Head-mounted Display, Avatars, Social Presence}
}
