From c25cffe0329dd944c6cccd0cd4743c5d22ffb241 Mon Sep 17 00:00:00 2001 From: Ruofei Du Date: Tue, 8 Oct 2024 12:16:23 -0700 Subject: [PATCH] Added UIST 2024. --- DuSigchiParser.py | 27 +- sigchi2/UIST_2024_program.html | 1382 ++ sigchi2/UIST_2024_program.json | 38546 +++++++++++++++++++++++++++++++ sigchi2/UIST_2024_program.md | 2944 +++ 4 files changed, 42889 insertions(+), 10 deletions(-) create mode 100644 sigchi2/UIST_2024_program.html create mode 100644 sigchi2/UIST_2024_program.json create mode 100644 sigchi2/UIST_2024_program.md diff --git a/DuSigchiParser.py b/DuSigchiParser.py index 592fed0..8d44e1d 100644 --- a/DuSigchiParser.py +++ b/DuSigchiParser.py @@ -2,6 +2,7 @@ import json import markdown +FOLDER_NAME = 'sigchi2' def convert_markdown_to_html(file_path): # Read the Markdown file @@ -19,24 +20,30 @@ def convert_markdown_to_html(file_path): print(f"Converted {file_path} to {output_file_path}") -def parse(filename_raw, data, indent=0, program='chi', year='2024'): - filename = 'sigchi/' + filename_raw[:-4] + 'md' +def parse(filename_raw, data, indent=0, program='uist', year='2024'): + filename = FOLDER_NAME + '/' + filename_raw[:-4] + 'md' # with open(filename + 'session.md', 'w', encoding='utf-8') as ff: with open(filename, 'w', encoding='utf-8') as f: # Iterate over sessions and extract paper titles, session names, and author names for session in data['sessions']: session_name = session['name'] - if not (session['typeId'] == 13269): + # if not (session['typeId'] == 13269): # CHI 2024 + if not (session['typeId'] == 13748): # UIST 2024 continue # Checks if it's a paper session. f.write(f"\n## {session_name}\n") # ff.write(f"\n## {session_name}\n") for content_id in session['contentIds']: content = next((c for c in data['contents'] if c['id'] == content_id), None) + # if content and content['typeId'] in [ + # 13269, 13341 + # ]: # Check if the content is a paper or journal in CHI + if content and content['typeId'] in [ - 13269, 13341 - ]: # Check if the content is a paper or journal + 13748 + ]: # Check if the content is a paper in UIST + paper_title = content['title'] authors = [] for author in content['authors']: @@ -55,10 +62,10 @@ def parse(filename_raw, data, indent=0, program='chi', year='2024'): f.write(f"[Link]({link})\n\n") KEYWORDS = [ 'Reality', 'XR', 'Virtual', 'LLM', 'AI', 'Large', 'Immersive', - 'Communication', 'Gaze' + 'Communication', 'Gaze', 'Dynamic', 'Perception', 'realities' ] - if any(keyword in session_name for keyword in KEYWORDS): - f.write(f"Abstract: {abstract}\n\n") + # if any(keyword in session_name for keyword in KEYWORDS): + f.write(f"Abstract: {abstract}\n\n") f.write('\n\n') convert_markdown_to_html(filename) @@ -68,7 +75,7 @@ def read_and_process_json_files(directory_path): # Loop through all files in the specified directory for filename in os.listdir(directory_path): # Check if the file is a JSON file and contains 'CHI_2024' in the filename - if filename.endswith(".json") and "CHI_2024" in filename: + if filename.endswith(".json"): filename_raw = filename file_path = os.path.join(directory_path, filename) # Open and read the JSON file with UTF-8 encoding @@ -81,5 +88,5 @@ def read_and_process_json_files(directory_path): # Get the current working directory current_directory = os.getcwd() # Specify the directory containing JSON files -directory_path = os.path.join(current_directory, "sigchi/") +directory_path = os.path.join(current_directory, FOLDER_NAME) read_and_process_json_files(directory_path) diff --git a/sigchi2/UIST_2024_program.html b/sigchi2/UIST_2024_program.html new file mode 100644 index 0000000..d2a6ac9 --- /dev/null +++ b/sigchi2/UIST_2024_program.html @@ -0,0 +1,1382 @@ +

Manipulating Text

+

Beyond the Chat: Executable and Verifiable Text-Editing with LLMs

+

Authors: Philippe Laban, Jesse Vig, Marti Hearst, Caiming Xiong, Chien-Sheng Wu

+

Link

+

Abstract: Conversational interfaces powered by Large Language Models (LLMs) have recently become a popular way to obtain feedback during document editing. However, standard chat-based conversational interfaces cannot explicitly surface the editing changes that they suggest. To give the author more control when editing with an LLM, we present InkSync, an editing interface that suggests executable edits directly within the document being edited. Because LLMs are known to introduce factual errors, Inksync also supports a 3-stage approach to mitigate this risk: Warn authors when a suggested edit introduces new information, help authors Verify the new information's accuracy through external search, and allow a third party to Audit with a-posteriori verification via a trace of all auto-generated content. +Two usability studies confirm the effectiveness of InkSync's components when compared to standard LLM-based chat interfaces, leading to more accurate and more efficient editing, and improved user experience.

+

ScriptViz: A Visualization Tool to Aid Scriptwriting based on a Large Movie Database

+

Authors: Anyi Rao, Jean-Peïc Chou, Maneesh Agrawala

+

Link

+

Abstract: Scriptwriters usually rely on their mental visualization to create a vivid story by using their imagination to see, feel, and experience the scenes they are writing. Besides mental visualization, they often refer to existing images or scenes in movies and analyze the visual elements to create a certain mood or atmosphere. In this paper, we develop a new tool, ScriptViz, to provide external visualization based on a large movie database for the screenwriting process. It retrieves reference visuals on the fly based on scripts’ text and dialogue from a large movie database. The tool provides two types of control on visual elements that enable writers to 1) see exactly what they want with fixed visual elements and 2) see variances in uncertain elements. User evaluation among 15 scriptwriters shows that ScriptViz is able to present scriptwriters with consistent yet diverse visual possibilities, aligning closely with their scripts and helping their creation.

+

SkipWriter: LLM-Powered Abbreviated Writing on Tablets

+

Authors: Zheer Xu, Shanqing Cai, Mukund Varma T, Subhashini Venugopalan, Shumin Zhai

+

Link

+

Abstract: Large Language Models (LLMs) may offer transformative opportunities for text input, especially for physically demanding modalities like handwriting. We studied a form of abbreviated handwriting by designing, developing, and evaluating a prototype, named SkipWriter, that converts handwritten strokes of a variable-length prefix-based abbreviation (e.g. "ho a y" as handwritten strokes) into the intended full phrase (e.g., "how are you" in the digital format) based on the preceding context. SkipWriter consists of an in-production handwriting recognizer and an LLM fine-tuned on this task. With flexible pen input, SkipWriter allows the user to add and revise prefix strokes when predictions do not match the user's intent. An user evaluation demonstrated a 60% reduction in motor movements with an average speed of 25.78 WPM. We also showed that this reduction is close to the ceiling of our model in an offline simulation.

+

Bluefish: Composing Diagrams with Declarative Relations

+

Authors: Josh Pollock, Catherine Mei, Grace Huang, Elliot Evans, Daniel Jackson, Arvind Satyanarayan

+

Link

+

Abstract: Diagrams are essential tools for problem-solving and communication as they externalize conceptual structures using spatial relationships. But when picking a diagramming framework, users are faced with a dilemma. They can either use a highly expressive but low-level toolkit, whose API does not match their domain-specific concepts, or select a high-level typology, which offers a recognizable vocabulary but supports a limited range of diagrams. To address this gap, we introduce Bluefish: a diagramming framework inspired by component-based user interface (UI) libraries. Bluefish lets users create diagrams using relations: declarative, composable, and extensible diagram fragments that relax the concept of a UI component. Unlike a component, a relation does not have sole ownership over its children nor does it need to fully specify their layout. To render diagrams, Bluefish extends a traditional tree-based scenegraph to a compound graph that captures both hierarchical and adjacent relationships between nodes. To evaluate our system, we construct a diverse example gallery covering many domains including mathematics, physics, computer science, and even cooking. We show that Bluefish's relations are effective declarative primitives for diagrams. Bluefish is open source, and we aim to shape it into both a usable tool and a research platform.

+

Future Fabrics

+

ScrapMap: Interactive Color Layout for Scrap Quilting

+

Authors: Mackenzie Leake, Ross Daly

+

Link

+

Abstract: Scrap quilting is a popular sewing process that involves combining leftover pieces of fabric into traditional patchwork designs. Imagining the possibilities for these leftovers and arranging the fabrics in such a way that achieves visual goals, such as high contrast, can be challenging given the large number of potential fabric assignments within the quilt's design. We formulate the task of designing a scrap quilt as a graph coloring problem with domain-specific coloring and material constraints. Our interactive tool called ScrapMap helps quilters explore these potential designs given their available materials by leveraging the hierarchy of scrap quilt construction (e.g., quilt blocks and motifs) and providing user-directed automatic block coloring suggestions. Our user evaluation indicates that quilters find ScrapMap useful for helping them consider new ways to use their scraps and create visually striking quilts.

+

What's in a cable? Abstracting Knitting Design Elements with Blended Raster/Vector Primitives

+

Authors: Hannah Twigg-Smith, Yuecheng Peng, Emily Whiting, Nadya Peek

+

Link

+

Abstract: In chart-based programming environments for machine knitting, patterns are specified at a low level by placing operations on a grid. This highly manual workflow makes it challenging to iterate on design elements such as cables, colorwork, and texture. While vector-based abstractions for knitting design elements may facilitate higher-level manipulation, they often include interdependencies which require stitch-level reconciliation. To address this, we contribute a new way of specifying knits with blended vector and raster primitives. Our abstraction supports the design of interdependent elements like colorwork and texture. We have implemented our blended raster/vector specification in a direct manipulation design tool where primitives are layered and rasterized, allowing for simulation of the resulting knit structure and generation of machine instructions. Through examples, we show how our approach enables higher-level manipulation of various knitting techniques, including intarsia colorwork, short rows, and cables. Specifically, we show how our tool supports the design of complex patterns including origami pleat patterns and capacitive sensor patches.

+

Embrogami: Shape-Changing Textiles with Machine Embroidery

+

Authors: Yu Jiang, Alice Haynes, Narjes Pourjafarian, Jan Borchers, Jürgen Steimle

+

Link

+

Abstract: Machine embroidery is a versatile technique for creating custom and entirely fabric-based patterns on thin and conformable textile surfaces. However, existing machine-embroidered surfaces remain static, limiting the interactions they can support. We introduce Embrogami, an approach for fabricating textile structures with versatile shape-changing behaviors. Inspired by origami, we leverage machine embroidery to form finger-tip-scale mountain-and-valley structures on textiles with customized shapes, bistable or elastic behaviors, and modular composition. The structures can be actuated by the user or the system to modify the local textile surface topology, creating interactive elements like toggles and sliders or textile shape displays with an ultra-thin, flexible, and integrated form factor. We provide a dedicated software tool and report results of technical experiments to allow users to flexibly design, fabricate, and deploy customized Embrogami structures. With four application cases, we showcase Embrogami’s potential to create functional and flexible shape-changing textiles with diverse visuo-tactile feedback.

+

KODA: Knit-program Optimization by Dependency Analysis

+

Authors: Megan Hofmann

+

Link

+

Abstract: Digital knitting machines have the capability to reliably manufacture seamless, textured, and multi-material garments, but these capabilities are obscured by limiting CAD tools. Recent innovations in computational knitting build on emerging programming infrastructure that gives full access to the machine's capabilities but requires an extensive understanding of machine operations and execution. In this paper, we contribute a critical missing piece of the knitting-machine programming pipeline--a program optimizer. Program optimization allows programmers to focus on developing novel algorithms that produce desired fabrics while deferring concerns of efficient machine operations to the optimizer. We present KODA, the Knit-program Optimization by Dependency Analysis method. KODA re-orders and reduces machine instructions to reduce knitting time, increase knitting reliability, and manage boilerplate operations that adjust the machine state. The result is a system that enables programmers to write readable and intuitive knitting algorithms while producing efficient and verified programs.

+

X-Hair: 3D Printing Hair-like Structures with Multi-form, Multi-property and Multi-function

+

Authors: Guanyun Wang, Junzhe Ji, Yunkai Xu, Lei Ren, Xiaoyang Wu, Chunyuan Zheng, Xiaojing Zhou, Xin Tang, Boyu Feng, Lingyun Sun, Ye Tao, Jiaji Li

+

Link

+

Abstract: In this paper, we present X-Hair, a method that enables 3D-printed hair with various forms, properties, and functions. We developed a two-step suspend printing strategy to fabricate hair-like structures in different forms (e.g. fluff, bristle, barb) by adjusting parameters including Extrusion Length Ratio and Total Length. Moreover, a design tool is also established for users to customize hair-like structures with various properties (e.g. pointy, stiff, soft) on imported 3D models, which virtually shows the results for previewing and generates G-code files for 3D printing. We demonstrate the design space of X-Hair and evaluate the properties of them with different parameters. Through a series of applications with hair-like structures, we validate X-hair's practical usage of biomimicry, decoration, heat preservation, adhesion, and haptic interaction.

+

TouchpadAnyWear: Textile-Integrated Tactile Sensors for Multimodal High Spatial-Resolution Touch Inputs with Motion Artifacts Tolerance

+

Authors: Junyi Zhao, Pornthep Preechayasomboon, Tyler Christensen, Amirhossein H. Memar, Zhenzhen Shen, Nick Colonnese, Michael Khbeis, Mengjia Zhu

+

Link

+

Abstract: This paper presents TouchpadAnyWear, a novel family of textile-integrated force sensors capable of multi-modal touch input, encompassing micro-gesture detection, two-dimensional (2D) continuous input, and force-sensitive strokes. This thin (\textless 1.5~mm) and conformal device features high spatial resolution sensing and motion artifact tolerance through its unique capacitive sensor architecture. The sensor consists of a knitted textile compressive core, sandwiched by stretchable silver electrodes, and conductive textile shielding layers on both sides. With a high-density sensor pixel array (25/cm\textsuperscript{2}), TouchpadAnyWear can detect touch input locations and sizes with millimeter-scale spatial resolution and a wide range of force inputs (0.05~N to 20~N). The incorporation of miniature polymer domes, referred to as ``poly-islands'', onto the knitted textile locally stiffens the sensing areas, thereby reducing motion artifacts during deformation. These poly-islands also provide passive tactile feedback to users, allowing for eyes-free localization of the active sensing pixels. Design choices and sensor performance are evaluated using in-depth mechanical characterization. Demonstrations include an 8-by-8 grid sensor as a miniature high-resolution touchpad and a T-shaped sensor for thumb-to-finger micro-gesture input. User evaluations validate the effectiveness and usability of TouchpadAnyWear in daily interaction contexts, such as tapping, forceful pressing, swiping, 2D cursor control, and 2D stroke-based gestures. This paper further discusses potential applications and explorations for TouchpadAnyWear in wearable smart devices, gaming, and augmented reality devices.

+

Storytime

+

Story-Driven: Exploring the Impact of Providing Real-time Context Information on Automated Storytelling

+

Authors: Jan Henry Belz, Lina Weilke, Anton Winter, Philipp Hallgarten, Enrico Rukzio, Tobias Grosse-Puppendahl

+

Link

+

Abstract: Stories have long captivated the human imagination with narratives that enrich our lives. Traditional storytelling methods are often static and not designed to adapt to the listener’s environment, which is full of dynamic changes. For instance, people often listen to stories in the form of podcasts or audiobooks while traveling in a car. Yet, conventional in-car storytelling systems do not embrace the adaptive potential of this space. The advent of generative AI is the key to creating content that is not just personalized but also responsive to the changing parameters of the environment. We introduce a novel system for interactive, real-time story narration that leverages environment and user context in correspondence with estimated arrival times to adjust the generated story continuously. Through two comprehensive real-world studies with a total of 30 participants in a vehicle, we assess the user experience, level of immersion, and perception of the environment provided by the prototype. Participants' feedback shows a significant improvement over traditional storytelling and highlights the importance of context information for generative storytelling systems.

+

Lumina: A Software Tool for Fostering Creativity in Designing Chinese Shadow Puppets

+

Authors: Zhihao Yao, Yao Lu, Qirui Sun, Shiqing Lyu, Hanxuan Li, Xing-Dong Yang, Xuezhu Wang, Guanhong Liu, Haipeng Mi

+

Link

+

Abstract: Shadow puppetry, a culturally rich storytelling art, faces challenges transitioning to the digital realm. Creators in the early design phase struggle with crafting intricate patterns, textures, and basic animations while adhering to stylistic conventions - hindering creativity, especially for novices. This paper presents Lumina, a tool to facilitate the early Chinese shadow puppet design stage. Lumina provides contour templates, animations, scene editing tools, and machine-generated traditional puppet patterns. These features liberate creators from tedious tasks, allowing focus on the creative process. Developed based on a formative study with puppet creators, the web-based Lumina enables wide dissemination. An evaluation with 18 participants demonstrated Lumina's effectiveness and ease of use, with participants successfully creating designs spanning traditional themes to contemporary and science-fiction concepts.

+

PortalInk: 2.5D Visual Storytelling with SVG Parallax and Waypoint Transitions

+

Authors: Tongyu Zhou, Joshua Yang, Vivian Chan, Ji Won Chung, Jeff Huang

+

Link

+

Abstract: Efforts to expand the authoring of visual stories beyond the 2D canvas have commonly mapped flat imagery to 3D scenes or objects. This translation requires spatial reasoning, as artists must think in two spaces. We propose PortalInk, a tool for artists to craft and export 2.5D graphical stories while remaining in 2D space by using SVG transitions. This is achieved via a parallax effect that generates a sense of depth that can be further explored using pan and zoom interactions. Any canvas position can be saved and linked to in a closed drawn stroke, or "portal," allowing the artist to create spatially discontinuous, or even infinitely looping visual trajectories. We provide three case studies and a gallery to demonstrate how artists can naturally incorporate these interactions to craft immersive comics, as well as re-purpose them to support use cases beyond drawing such as animation, slide-based presentations, web design, and digital journalism.

+

DrawTalking: Building Interactive Worlds by Sketching and Speaking

+

Authors: Karl Rosenberg, Rubaiat Habib Kazi, Li-Yi Wei, Haijun Xia, Ken Perlin

+

Link

+

Abstract: We introduce DrawTalking, an approach to building and controlling interactive worlds by sketching and speaking while telling stories. It emphasizes user control and flexibility, and gives programming-like capability without requiring code. An early open-ended study with our prototype shows that the mechanics resonate and are applicable to many creative-exploratory use cases, with the potential to inspire and inform research in future natural interfaces for creative exploration and authoring.

+

Patchview: LLM-powered Worldbuilding with Generative Dust and Magnet Visualization

+

Authors: John Chung, Max Kreminski

+

Link

+

Abstract: Large language models (LLMs) can help writers build story worlds by generating world elements, such as factions, characters, and locations. However, making sense of many generated elements can be overwhelming. Moreover, if the user wants to precisely control aspects of generated elements that are difficult to specify verbally, prompting alone may be insufficient. We introduce Patchview, a customizable LLM-powered system that visually aids worldbuilding by allowing users to interact with story concepts and elements through the physical metaphor of magnets and dust. Elements in Patchview are visually dragged closer to concepts with high relevance, facilitating sensemaking. The user can also steer the generation with verbally elusive concepts by indicating the desired position of the element between concepts. When the user disagrees with the LLM's visualization and generation, they can correct those by repositioning the element. These corrections can be used to align the LLM's future behaviors to the user's perception. With a user study, we show that Patchview supports the sensemaking of world elements and steering of element generation, facilitating exploration during the worldbuilding process. Patchview provides insights on how customizable visual representation can help sensemake, steer, and align generative AI model behaviors with the user's intentions.

+

An Interactive System for Suporting Creative Exploration of Cinematic Composition Designs

+

Authors: Rui He, Huaxin Wei, Ying Cao

+

Link

+

Abstract: Designing cinematic compositions, which involves moving cameras through a scene, is essential yet challenging in filmmaking. Machinima filmmaking provides real-time virtual environments for exploring different compositions flexibly and efficiently. However, producing high-quality cinematic compositions in such environments still requires significant cinematography skills and creativity. This paper presents Cinemassist, a tool designed to support and enhance this creative process by generating a variety of cinematic composition proposals at both keyframe and scene levels, which users can incorporate into their workflows and achieve more creative results. At the crux of our system is a deep generative model trained on real movie data, which can generate plausible, diverse camera poses conditioned on 3D animations and additional input semantics. Our model enables an interactive cinematic composition design workflow where users can co-design with the model by being inspired by model-generated suggestions while having control over the generation process. Our user study and expert rating find Cinemassist can facilitate the design process for users of different backgrounds and enhance the design quality especially for users with animation expertise, demonstrating its potential as an invaluable tool in the context of digital filmmaking.

+

Beyond mobile

+

picoRing: battery-free rings for subtle thumb-to-index input

+

Authors: Ryo Takahashi, Eric Whitmire, Roger Boldu, Shiu Ng, Wolf Kienzle, Hrvoje Benko

+

Link

+

Abstract: Smart rings for subtle, reliable finger input offer an attractive path for ubiquitous interaction with wearable computing platforms. +However, compared to ordinary rings worn for cultural or fashion reasons, smart rings are much bulkier and less comfortable, largely due to the space required for a battery, which also limits the space available for sensors. +This paper presents picoRing, a flexible sensing architecture that enables a variety of battery-free smart rings paired with a wristband. +By inductively connecting a wristband-based sensitive reader coil with a ring-based fully-passive sensor coil, picoRing enables the wristband to stably detect the passive response from the ring via a weak inductive coupling. +We demonstrate four different rings that support thumb-to-finger interactions like pressing, sliding, or scrolling. +When users perform these interactions, the corresponding ring converts each input into a unique passive response through a network of passive switches. +Combining the coil-based sensitive readout with the fully-passive ring design enables a tiny ring that weighs as little as 1.5 g and achieves a 13 cm stable readout despite finger bending, and proximity to metal.

+

WatchLink: Enhancing Smartwatches with Sensor Add-Ons via ECG Interface

+

Authors: Anandghan Waghmare, Ishan Chatterjee, Vikram Iyer, Shwetak Patel

+

Link

+

Abstract: We introduce a low-power communication method that lets smartwatches leverage existing electrocardiogram (ECG) hardware as a data communication interface. Our unique approach enables the connection of external, inexpensive, and low-power "add-on" sensors to the smartwatch, expanding its functionalities. These sensors cater to specialized user needs beyond those offered by pre-built sensor suites, at a fraction of the cost and power of traditional communication protocols, including Bluetooth Low Energy. To demonstrate the feasibility of our approach, we conduct a series of exploratory and evaluative tests to characterize the ECG interface as a communication channel on commercial smartwatches. We design a simple transmission scheme using commodity components, demonstrating cost and power benefits. Further, we build and test a suite of add-on sensors, including UV light, body temperature, buttons, and breath alcohol, all of which achieved testing objectives at low material cost and power usage. This research paves the way for personalized and user-centric wearables by offering a cost-effective solution to expand their functionalities.

+

PrISM-Observer: Intervention Agent to Help Users Perform Everyday Procedures Sensed using a Smartwatch

+

Authors: Riku Arakawa, Hiromu Yakura, Mayank Goel

+

Link

+

Abstract: We routinely perform procedures (such as cooking) that include a set of atomic steps. Often, inadvertent omission or misordering of a single step can lead to serious consequences, especially for those experiencing cognitive challenges such as dementia. This paper introduces PrISM-Observer, a smartwatch-based, context-aware, real-time intervention system designed to support daily tasks by preventing errors. Unlike traditional systems that require users to seek out information, the agent observes user actions and intervenes proactively. This capability is enabled by the agent's ability to continuously update its belief in the user's behavior in real-time through multimodal sensing and forecast optimal intervention moments and methods. We first validated the steps-tracking performance of our framework through evaluations across three datasets with different complexities. Then, we implemented a real-time agent system using a smartwatch and conducted a user study in a cooking task scenario. The system generated helpful interventions, and we gained positive feedback from the participants. The general applicability of PrISM-Observer to daily tasks promises broad applications, for instance, including support for users requiring more involved interventions, such as people with dementia or post-surgical patients.

+

Validation in AI/ML

+

Natural Expression of a Machine Learning Model's Uncertainty Through Verbal and Non-Verbal Behavior of Intelligent Virtual Agents

+

Authors: Susanne Schmidt, Tim Rolff, Henrik Voigt, Micha Offe, Frank Steinicke

+

Link

+

Abstract: Uncertainty cues are inherent in natural human interaction, as they signal to communication partners how much they can rely on conveyed information. Humans subconsciously provide such signals both verbally (e.g., through expressions such as "maybe" or "I think") and non-verbally (e.g., by diverting their gaze). In contrast, artificial intelligence (AI)-based services and machine learning (ML) models such as ChatGPT usually do not disclose the reliability of answers to their users. +In this paper, we explore the potential of combining ML models as powerful information sources with human means of expressing uncertainty to contextualize the information. We present a comprehensive pipeline that comprises (1) the human-centered collection of (non-)verbal uncertainty cues, (2) the transfer of cues to virtual agent videos, (3) the annotation of videos for perceived uncertainty, and (4) the subsequent training of a custom ML model that can generate uncertainty cues in virtual agent behavior. In a final step (5), the trained ML model is evaluated in terms of both fidelity and generalizability of the generated (non-)verbal uncertainty behavior.

+

Who Validates the Validators? Aligning LLM-Assisted Evaluation of LLM Outputs with Human Preferences

+

Authors: Shreya Shankar, J.D. Zamfirescu-Pereira, Bjoern Hartmann, Aditya Parameswaran, Ian Arawjo

+

Link

+

Abstract: Due to the cumbersome nature of human evaluation and limitations of code-based evaluation, Large Language Models (LLMs) are increasingly being used to assist humans in evaluating LLM outputs. Yet LLM-generated evaluators simply inherit all the problems of the LLMs they evaluate, requiring further human validation. We present a mixed-initiative approach to “validate the validators”— aligning LLM-generated evaluation functions (be it prompts or code) with human requirements. Our interface, EvalGen, provides automated assistance to users in generating evaluation criteria and implementing assertions. While generating candidate implementations (Python functions, LLM grader prompts), EvalGen asks humans to grade a subset of LLM outputs; this feedback is used to select implementations that better align with user grades. A qualitative study finds overall support for EvalGen but underscores the subjectivity and iterative nature of alignment. In particular, we identify a phenomenon we dub criteria drift: users need criteria to grade outputs, but grading outputs helps users define criteria. What is more, some criteria appear dependent on the specific LLM outputs observed (rather than independent and definable a priori), raising serious questions for approaches that assume the independence of evaluation from observation of model outputs. We present our interface and implementation details, a comparison of our algorithm with a baseline approach, and implications for the design of future LLM evaluation assistants.

+

LlamaTouch: A Faithful and Scalable Testbed for Mobile UI Task Automation

+

Authors: Li Zhang, Shihe Wang, Xianqing Jia, Zhihan Zheng, Yunhe Yan, Longxi Gao, Yuanchun Li, Mengwei Xu

+

Link

+

Abstract: The emergent large language/multimodal models facilitate the evolution of mobile agents, especially in mobile UI task automation. However, existing evaluation approaches, which rely on human validation or established datasets to compare agent-predicted actions with predefined action sequences, are unscalable and unfaithful. To overcome these limitations, this paper presents LlamaTouch, a testbed for on-device mobile UI task execution and faithful, scalable task evaluation. By observing that the task execution process only transfers UI states, LlamaTouch employs a novel evaluation approach that only assesses whether an agent traverses all manually annotated, essential application/system states. LlamaTouch comprises three key techniques: (1) On-device task execution that enables mobile agents to interact with realistic mobile environments for task execution. (2) Fine-grained UI component annotation that merges pixel-level screenshots and textual screen hierarchies to explicitly identify and precisely annotate essential UI components with a rich set of designed annotation primitives. (3) A multi-level application state matching algorithm that utilizes exact and fuzzy matching to accurately detect critical information in each screen, even with unpredictable UI layout/content dynamics. LlamaTouch currently incorporates four mobile agents and 496 tasks, encompassing both tasks in the widely-used datasets and our self-constructed ones to cover more diverse mobile applications. Evaluation results demonstrate LlamaTouch’s high faithfulness of evaluation in real-world mobile environments and its better scalability than human validation. LlamaTouch also enables easy task annotation and integration of new mobile agents. Code and dataset are publicly available at https://github.com/LlamaTouch/LlamaTouch.

+

Clarify: Improving Model Robustness With Natural Language Corrections

+

Authors: Yoonho Lee, Michelle Lam, Helena Vasconcelos, Michael Bernstein, Chelsea Finn

+

Link

+

Abstract: The standard way to teach models is by feeding them lots of data. However, this approach often teaches models incorrect ideas because they pick up on misleading signals in the data. To prevent such misconceptions, we must necessarily provide additional information beyond the training data. Prior methods incorporate additional instance-level supervision, such as labels for misleading features or additional labels for debiased data. However, such strategies require a large amount of labeler effort. We hypothesize that people are good at providing textual feedback at the concept level, a capability that existing teaching frameworks do not leverage. We propose Clarify, a novel interface and method for interactively correcting model misconceptions. Through Clarify, users need only provide a short text description of a model's consistent failure patterns. Then, in an entirely automated way, we use such descriptions to improve the training process. Clarify is the first end-to-end system for user model correction. Our user studies show that non-expert users can successfully describe model misconceptions via Clarify, leading to increased worst-case performance in two datasets. We additionally conduct a case study on a large-scale image dataset, ImageNet, using Clarify to find and rectify 31 novel hard subpopulations.

+

"The Data Says Otherwise" – Towards Automated Fact-checking and Communication of Data Claims

+

Authors: Yu Fu, Shunan Guo, Jane Hoffswell, Victor S. Bursztyn, Ryan Rossi, John Stasko

+

Link

+

Abstract: Fact-checking data claims requires data evidence retrieval and analysis, which can become tedious and intractable when done manually. This work presents Aletheia, an automated fact-checking prototype designed to facilitate data claims verification and enhance data evidence communication. For verification, we utilize a pre-trained LLM to parse the semantics for evidence retrieval. To effectively communicate the data evidence, we design representations in two forms: data tables and visualizations, tailored to various data fact types. Additionally, we design interactions that showcase a real-world application of these techniques. We evaluate the performance of two core NLP tasks with a curated dataset comprising 400 data claims and compare the two representation forms regarding viewers’ assessment time, confidence, and preference via a user study with 20 participants. The evaluation offers insights into the feasibility and bottlenecks of using LLMs for data fact-checking tasks, potential advantages and disadvantages of using visualizations over data tables, and design recommendations for presenting data evidence.

+

A11y

+

ProgramAlly: Creating Custom Visual Access Programs via Multi-Modal End-User Programming

+

Authors: Jaylin Herskovitz, Andi Xu, Rahaf Alharbi, Anhong Guo

+

Link

+

Abstract: Existing visual assistive technologies are built for simple and common use cases, and have few avenues for blind people to customize their functionalities. Drawing from prior work on DIY assistive technology, this paper investigates end-user programming as a means for users to create and customize visual access programs to meet their unique needs. We introduce ProgramAlly, a system for creating custom filters for visual information, e.g., 'find NUMBER on BUS', leveraging three end-user programming approaches: block programming, natural language, and programming by example. To implement ProgramAlly, we designed a representation of visual filtering tasks based on scenarios encountered by blind people, and integrated a set of on-device and cloud models for generating and running these programs. In user studies with 12 blind adults, we found that participants preferred different programming modalities depending on the task, and envisioned using visual access programs to address unique accessibility challenges that are otherwise difficult with existing applications. Through ProgramAlly, we present an exploration of how blind end-users can create visual access programs to customize and control their experiences.

+

Accessible Gesture Typing on Smartphones for People with Low Vision

+

Authors: Dan Zhang, Zhi Li, Vikas Ashok, William H Seiple, IV Ramakrishnan, Xiaojun Bi

+

Link

+

Abstract: While gesture typing is widely adopted on touchscreen keyboards, its support for low vision users is limited. We have designed and implemented two keyboard prototypes, layout-magnified and key-magnified keyboards, to enable gesture typing for people with low vision. Both keyboards facilitate uninterrupted access to all keys while the screen magnifier is active, allowing people with low vision to input text with one continuous stroke. Furthermore, we have created a kinematics-based decoding algorithm to accommodate the typing behavior of people with low vision. This algorithm can decode the gesture input even if the gesture trace deviates from a pre-defined word template, and the starting position of the gesture is far from the starting letter of the target word. Our user study showed that the key-magnified keyboard achieved 5.28 words per minute, 27.5% faster than a conventional gesture typing keyboard with voice feedback.

+

AccessTeleopKit: A Toolkit for Creating Accessible Web-Based Interfaces for Tele-Operating an Assistive Robot

+

Authors: Vinitha Ranganeni, Varad Dhat, Noah Ponto, Maya Cakmak

+

Link

+

Abstract: Mobile manipulator robots, which can move around and physically interact with their environments, can empower people with motor limitations to independently carry out many activities of daily living. While many interfaces have been developed for tele-operating complex robots, most of them are not accessible to people with severe motor limitations. Further, most interfaces are rigid with limited configurations and are not readily available to download and use. To address these barriers, we developed AccessTeleopKit: an open-source toolkit for creating custom and accessible robot tele-operation interfaces based on cursor-and-click input for the Stretch 3 mobile-manipulator. With AccessTeleopKit users can add, remove, and rearrange components such as buttons and camera views, and select between a variety of control modes. We describe the participatory and iterative design process that led to the current implementation of AccessTeleopKit, involving three long-term deployments of the robot in the home of a quadriplegic user. We demonstrate how AccessTeleopKit allowed the user to create different interfaces for different tasks and the diversity of tasks it allowed the user to carry out. We also present two studies involving six additional users with severe motor limitations, demonstrating the power of AccessTeleopKit in creating custom interfaces for different user needs and preferences.

+

Memory Reviver: Supporting Photo-Collection Reminiscence for People with Visual Impairment via a Proactive Chatbot

+

Authors: Shuchang Xu, Chang Chen, Zichen LIU, Xiaofu Jin, Linping Yuan, Yukang Yan, Huamin Qu

+

Link

+

Abstract: Reminiscing with photo collections offers significant psychological benefits but poses challenges for people with visual impairment (PVI). Their current reliance on sighted help restricts the flexibility of this activity. In response, we explored using a chatbot in a preliminary study. We identified two primary challenges that hinder effective reminiscence with a chatbot: the scattering of information and a lack of proactive guidance. To address these limitations, we present Memory Reviver, a proactive chatbot that helps PVI reminisce with a photo collection through natural language communication. Memory Reviver incorporates two novel features: (1) a Memory Tree, which uses a hierarchical structure to organize the information in a photo collection; and (2) a Proactive Strategy, which actively delivers information to users at proper conversation rounds. Evaluation with twelve PVI demonstrated that Memory Reviver effectively facilitated engaging reminiscence, enhanced understanding of photo collections, and delivered natural conversational experiences. Based on our findings, we distill implications for supporting photo reminiscence and designing chatbots for PVI.

+

VizAbility: Enhancing Chart Accessibility with LLM-based Conversational Interaction

+

Authors: Joshua Gorniak, Yoon Kim, Donglai Wei, Nam Wook Kim

+

Link

+

Abstract: Traditional accessibility methods like alternative text and data tables typically underrepresent data visualization's full potential. Keyboard-based chart navigation has emerged as a potential solution, yet efficient data exploration remains challenging. We present VizAbility, a novel system that enriches chart content navigation with conversational interaction, enabling users to use natural language for querying visual data trends. VizAbility adapts to the user's navigation context for improved response accuracy and facilitates verbal command-based chart navigation. Furthermore, it can address queries for contextual information, designed to address the needs of visually impaired users. We designed a large language model (LLM)-based pipeline to address these user queries, leveraging chart data & encoding, user context, and external web knowledge. We conducted both qualitative and quantitative studies to evaluate VizAbility's multimodal approach. We discuss further opportunities based on the results, including improved benchmark testing, incorporation of vision models, and integration with visualization workflows.

+

Computational Trichromacy Reconstruction: Empowering the Color-Vision Deficient to Recognize Colors Using Augmented Reality

+

Authors: Yuhao Zhu, Ethan Chen, Colin Hascup, Yukang Yan, Gaurav Sharma

+

Link

+

Abstract: We propose an assistive technology that helps individuals with Color Vision Deficiencies (CVD) to recognize/name colors. +A dichromat's color perception is a reduced two-dimensional (2D) subset of a normal +trichromat's three dimensional color (3D) perception, leading to confusion when visual stimuli that appear identical to the dichromat are referred to by different color names. +Using our proposed system, CVD individuals can interactively induce distinct perceptual changes to originally confusing colors via a computational color space transformation. +By combining their original 2D precepts for colors with the discriminative changes, a three dimensional color space is reconstructed, where the dichromat can learn to resolve color name confusions and accurately recognize colors. +Our system is implemented as an Augmented Reality (AR) interface on smartphones, where users interactively control the rotation through swipe gestures and observe the induced color shifts in the camera view or in a displayed image. Through psychophysical experiments and a longitudinal user study, we demonstrate that such rotational color shifts have discriminative power (initially confusing colors become distinct under rotation) and exhibit structured perceptual shifts dichromats can learn with modest training. The AR App is also evaluated in two real-world scenarios (building with lego blocks and interpreting artistic works); users all report positive experience in using the App to recognize object colors that they otherwise could not.

+

Contextual Augmentations

+

StreetNav: Leveraging Street Cameras to Support Precise Outdoor Navigation for Blind Pedestrians

+

Authors: Gaurav Jain, Basel Hindi, Zihao Zhang, Koushik Srinivasula, Mingyu Xie, Mahshid Ghasemi, Daniel Weiner, Sophie Ana Paris, Xin Yi Therese Xu, Michael Malcolm, Mehmet Kerem Turkcan, Javad Ghaderi, Zoran Kostic, Gil Zussman, Brian Smith

+

Link

+

Abstract: Blind and low-vision (BLV) people rely on GPS-based systems for outdoor navigation. GPS's inaccuracy, however, causes them to veer off track, run into obstacles, and struggle to reach precise destinations. While prior work has made precise navigation possible indoors via hardware installations, enabling this outdoors remains a challenge. Interestingly, many outdoor environments are already instrumented with hardware such as street cameras. In this work, we explore the idea of repurposing existing street cameras for outdoor navigation. Our community-driven approach considers both technical and sociotechnical concerns through engagements with various stakeholders: BLV users, residents, business owners, and Community Board leadership. The resulting system, StreetNav, processes a camera's video feed using computer vision and gives BLV pedestrians real-time navigation assistance. Our evaluations show that StreetNav guides users more precisely than GPS, but its technical performance is sensitive to environmental occlusions and distance from the camera. We discuss future implications for deploying such systems at scale.

+

WorldScribe: Towards Context-Aware Live Visual Descriptions

+

BEST_PAPER

+

Authors: Ruei-Che Chang, Yuxuan Liu, Anhong Guo

+

Link

+

Abstract: Automated live visual descriptions can aid blind people in understanding their surroundings with autonomy and independence. However, providing descriptions that are rich, contextual, and just-in-time has been a long-standing challenge in accessibility. In this work, we develop WorldScribe, a system that generates automated live real-world visual descriptions that are customizable and adaptive to users' contexts: (i) WorldScribe's descriptions are tailored to users' intents and prioritized based on semantic relevance. (ii) WorldScribe is adaptive to visual contexts, e.g., providing consecutively succinct descriptions for dynamic scenes, while presenting longer and detailed ones for stable settings. (iii) WorldScribe is adaptive to sound contexts, e.g., increasing volume in noisy environments, or pausing when conversations start. Powered by a suite of vision, language, and sound recognition models, WorldScribe introduces a description generation pipeline that balances the tradeoffs between their richness and latency to support real-time use. The design of WorldScribe is informed by prior work on providing visual descriptions and a formative study with blind participants. Our user study and subsequent pipeline evaluation show that WorldScribe can provide real-time and fairly accurate visual descriptions to facilitate environment understanding that is adaptive and customized to users' contexts. Finally, we discuss the implications and further steps toward making live visual descriptions more context-aware and humanized.

+

CookAR: Affordance Augmentations in Wearable AR to Support Kitchen Tool Interactions for People with Low Vision

+

Authors: Jaewook Lee, Andrew Tjahjadi, Jiho Kim, Junpu Yu, Minji Park, Jiawen Zhang, Jon Froehlich, Yapeng Tian, Yuhang Zhao

+

Link

+

Abstract: Cooking is a central activity of daily living, supporting independence as well as mental and physical health. However, prior work has highlighted key barriers for people with low vision (LV) to cook, particularly around safely interacting with tools, such as sharp knives or hot pans. Drawing on recent advancements in computer vision (CV), we present CookAR, a head-mounted AR system with real-time object affordance augmentations to support safe and efficient interactions with kitchen tools. To design and implement CookAR, we collected and annotated the first egocentric dataset of kitchen tool affordances, fine-tuned an affordance segmentation model, and developed an AR system with a stereo camera to generate visual augmentations. To validate CookAR, we conducted a technical evaluation of our fine-tuned model as well as a qualitative lab study with 10 LV participants for suitable augmentation design. Our technical evaluation demonstrates that our model outperforms the baseline on our tool affordance dataset, while our user study indicates a preference for affordance augmentations over the traditional whole object augmentations.

+

DesignChecker: Visual Design Support for Blind and Low Vision Web Developers

+

Authors: Mina Huh, Amy Pavel

+

Link

+

Abstract: Blind and low vision (BLV) developers create websites to share knowledge and showcase their work. A well-designed website can engage audiences and deliver information effectively, yet it remains challenging for BLV developers to review their web designs. We conducted interviews with BLV developers (N=9) and analyzed 20 websites created by BLV developers. BLV developers created highly accessible websites but wanted to assess the usability of their websites for sighted users and follow the design standards of other websites. They also encountered challenges using screen readers to identify illegible text, misaligned elements, and inharmonious colors. We present DesignChecker, a browser extension that helps BLV developers improve their web designs. With DesignChecker, users can assess their current design by comparing it to visual design guidelines, a reference website of their choice, or a set of similar websites. DesignChecker also identifies the specific HTML elements that violate design guidelines and suggests CSS changes for improvements. Our user study participants (N=8) recognized more visual design errors than using their typical workflow and expressed enthusiasm about using DesignChecker in the future.

+

Dynamic Objects & Materials

+

MagneDot: Integrated Fabrication and Actuation Methods of Dot-Based Magnetic Shape Displays

+

Authors: Lingyun Sun, Yitao Fan, Boyu Feng, Yifu Zhang, Deying Pan, Yiwen Ren, Yuyang Zhang, Qi Wang, Ye Tao, Guanyun Wang

+

Link

+

Abstract: This paper presents MagneDot, a novel method for making interactive magnetic shape displays through an integrated fabrication process. Magnetic soft materials can potentially create fast, responsive morphing structures for interactions. However, novice users and designers typically do not have access to sophisticated equipment and materials or cannot afford heavy labor to create interactive objects based on this material. Modified from an open-source 3D printer, the fabrication system of MagneDot integrates the processes of mold-making, pneumatic extrusion, magnetization, and actuation, using cost-effective materials only. By providing a design tool, MagneDot allows users to generate G-codes for fabricating and actuating displays of various morphing effects. Finally, a series of design examples demonstrate the possibilities of shape displays enabled by MagneDot.

+

CARDinality: Interactive Card-shaped Robots with Locomotion and Haptics using Vibration

+

Authors: Aditya Retnanto, Emilie Faracci, Anup Sathya, Yu-Kai Hung, Ken Nakagaki

+

Link

+

Abstract: This paper introduces a novel approach to interactive robots by leveraging the form-factor of cards to create thin robots equipped with vibrational capabilities for locomotion and haptic feedback. The system is composed of flat-shaped robots with on-device sensing and wireless control, which offer lightweight portability and scalability. This research introduces a hardware prototype to explore the possibility of ‘vibration-based omni-directional sliding locomotion’. Applications include augmented card playing, educational tools, and assistive technology, which showcase CARDinality’s versatility in tangible interaction.

+

PortaChrome: A Portable Contact Light Source for Integrated Re-Programmable Multi-Color Textures

+

Authors: Yunyi Zhu, Cedric Honnet, Yixiao Kang, Junyi Zhu, Angelina Zheng, Kyle Heinz, Grace Tang, Luca Musk, Michael Wessely, Stefanie Mueller

+

Link

+

Abstract: In this paper, we present PortaChrome, a portable light source that can be attached to everyday objects to reprogram the color and texture of surfaces that come in contact with them. When PortaChrome makes contact with objects previously coated with photochromic dye, the UV and RGB LEDs inside PortaChrome create multi-color textures on the objects. In contrast to prior work, which used projectors for the color-change, PortaChrome has a thin and flexible form factor, which allows the color-change process to be integrated into everyday user interaction. Because of the close distance between the light source and the photochromic object, PortaChrome creates color textures in less than 4 minutes on average, which is 8 times faster than prior work. We demonstrate PortaChrome with four application examples, including data visualizations on textiles and dynamic designs on wearables.

+

Augmented Object Intelligence with XR-Objects

+

Authors: Mustafa Doga Dogan, Eric Gonzalez, Karan Ahuja, Ruofei Du, Andrea Colaço, Johnny Lee, Mar Gonzalez-Franco, David Kim

+

Link

+

Abstract: Seamless integration of physical objects as interactive digital entities remains a challenge for spatial computing. This paper explores Augmented Object Intelligence (AOI) in the context of XR, an interaction paradigm that aims to blur the lines between digital and physical by equipping real-world objects with the ability to interact as if they were digital, where every object has the potential to serve as a portal to digital functionalities. Our approach utilizes real-time object segmentation and classification, combined with the power of Multimodal Large Language Models (MLLMs), to facilitate these interactions without the need for object pre-registration. We implement the AOI concept in the form of XR-Objects, an open-source prototype system that provides a platform for users to engage with their physical environment in contextually relevant ways using object-based context menus. This system enables analog objects to not only convey information but also to initiate digital actions, such as querying for details or executing tasks. Our contributions are threefold: (1) we define the AOI concept and detail its advantages over traditional AI assistants, (2) detail the XR-Objects system’s open-source design and implementation, and (3) show its versatility through various use cases and a user study.

+

Generating Visuals

+

ShadowMagic: Designing Human-AI Collaborative Support for Comic Professionals’ Shadowing

+

Authors: Amrita Ganguly, Chuan Yan, John Chung, Tong Sun, YOON KIHEON, Yotam Gingold, Sungsoo Ray Hong

+

Link

+

Abstract: Shadowing allows artists to convey realistic volume and emotion of characters in comic colorization. While AI technologies have the potential to improve professionals’ shadowing experience, current practice is manual and time-consuming. To understand how we can improve their shadowing experience, we conducted interviews with 5 professionals. We found that professionals’ level of engagement can vary depending on semantics, such as characters’ faces or hair. We also found they spent time on shadow “landscaping”—deciding where to place large shadow regions to create a realistic volumetric presentation while the final results can vary dramatically depending on their “staging” and “attention guiding” needs. We discovered they would accept AI suggestions for less engaging semantic parts or landscaping, while needing the capability to adjust details. Based on our observations, we developed ShadowMagic, which (1) generates AI-driven shadows based on commonly used light directions, (2) enables users to selectively choose results depending on semantics, and (3) allows users to complete shadow areas themselves for further perfection. Through a summative evaluation with 5 professionals, we found that they were significantly more satisfied with our AI-driven results compared to a baseline. We also found that ShadowMagic’s “step by step” workflow helps participants more easily adopt AI-driven results. We conclude by providing implications.

+

What's the Game, then? Opportunities and Challenges for Runtime Behavior Generation

+

BEST_PAPER

+

Authors: Nicholas Jennings, Han Wang, Isabel Li, James Smith, Bjoern Hartmann

+

Link

+

Abstract: Procedural content generation (PCG), the process of algorithmically creating game components instead of manually, has been a common tool of game development for decades. Recent advances in large language models (LLMs) enable the generation of game behaviors based on player input at runtime. Such code generation brings with it the possibility of entirely new gameplay interactions that may be difficult to integrate with typical game development workflows. We explore these implications through GROMIT, a novel LLM-based runtime behavior generation system for Unity. When triggered by a player action, GROMIT generates a relevant behavior which is compiled without developer intervention and incorporated into the game. We create three demonstration scenarios with GROMIT to investigate how such a technology might be used in game development. In a system evaluation we find that our implementation is able to produce behaviors that result in significant downstream impacts to gameplay. We then conduct an interview study with n=13 game developers using GROMIT as a probe to elicit their current opinion on runtime behavior generation tools, and enumerate the specific themes curtailing the wider use of such tools. We find that the main themes of concern are quality considerations, community expectations, and fit with developer workflows, and that several of the subthemes are unique to runtime behavior generation specifically. We outline a future work agenda to address these concerns, including the need for additional guardrail systems for behavior generation.

+

StyleFactory: Towards Better Style Alignment in Image Creation through Style-Strength-Based Control and Evaluation

+

Authors: Mingxu Zhou, Dengming Zhang, Weitao You, Ziqi Yu, Yifei Wu, Chenghao Pan, Huiting Liu, Tianyu Lao, Pei Chen

+

Link

+

Abstract: Generative AI models have been widely used for image creation. However, generating images that are well-aligned with users' personal styles on aesthetic features (e.g., color and texture) can be challenging due to the poor style expression and interpretation between humans and models. Through a formative study, we observed that participants showed a clear subjective perception of the desired style and variations in its strength, which directly inspired us to develop style-strength-based control and evaluation. Building on this, we present StyleFactory, an interactive system that helps users achieve style alignment. Our interface enables users to rank images based on their strengths in the desired style and visualizes the strength distribution of other images in that style from the model's perspective. In this way, users can evaluate the understanding gap between themselves and the model, and define well-aligned personal styles for image creation through targeted iterations. Our technical evaluation and user study demonstrate that StyleFactory accurately generates images in specific styles, effectively facilitates style alignment in image creation workflow, stimulates creativity, and enhances the user experience in human-AI interactions.

+

AutoSpark: Supporting Automobile Appearance Design Ideation with Kansei Engineering and Generative AI

+

Authors: Liuqing Chen, Qianzhi Jing, Yixin Tsang, Qianyi Wang, Ruocong Liu, Duowei Xia, Yunzhan Zhou, Lingyun Sun

+

Link

+

Abstract: Rapid creation of novel product appearance designs that align with consumer emotional requirements poses a significant challenge. Text-to-image models, with their excellent image generation capabilities, have demonstrated potential in providing inspiration to designers. However, designers still encounter issues including aligning emotional needs, expressing design intentions, and comprehending generated outcomes in practical applications. To address these challenges, we introduce AutoSpark, an interactive system that integrates Kansei Engineering and generative AI to provide creativity support for designers in creating automobile appearance designs that meet emotional needs. AutoSpark employs a Kansei Engineering engine powered by generative AI and a semantic network to assist designers in emotional need alignment, design intention expression, and prompt crafting. It also facilitates designers' understanding and iteration of generated results through fine-grained image-image similarity comparisons and text-image relevance assessments. The design-thinking map within its interface aids in managing the design process. Our user study indicates that AutoSpark effectively aids designers in producing designs that are more aligned with emotional needs and of higher quality compared to a baseline system, while also enhancing the designers' experience in the human-AI co-creation process.

+

Movement-based UIs

+

Feminist Interaction Techniques: Social Consent Signals to Deter NCIM Screenshots

+

Authors: Li Qiwei, Francesca Lameiro, Shefali Patel, Cristi Isaula-Reyes, Eytan Adar, Eric Gilbert, Sarita Schoenebeck

+

Link

+

Abstract: Non-consensual Intimate Media (NCIM) refers to the distribution of sexual or intimate content without consent. NCIM is common and causes significant emotional, financial, and reputational harm. We developed Hands-Off, an interaction technique for messaging applications that deters non-consensual screenshots. Hands-Off requires recipients to perform a hand gesture in the air, above the device, to unlock media—which makes simultaneous screenshotting difficult. A lab study shows that Hands-Off gestures are easy +to perform and reduce non-consensual screenshots by 67%. We conclude by generalizing this approach and introduce the idea of Feminist Interaction Techniques (FIT), interaction techniques that encode feminist values and speak to societal problems, and reflect on FIT’s opportunities and limitations.

+

Effects of Computer Mouse Lift-off Distance Settings in Mouse Lifting Action

+

Authors: Munjeong Kim, Sunjun Kim

+

Link

+

Abstract: This study investigates the effect of Lift-off Distance (LoD) on a computer mouse, which refers to the height at which a mouse sensor stops tracking when lifted off the surface. Although a low LoD is generally preferred to avoid unintentional cursor movement in mouse lifting (=clutching), especially in first-person shooter games, it may reduce tracking stability. +We conducted a psychophysical experiment to measure the perceptible differences between LoD levels and quantitatively measured the unintentional cursor movement error and tracking stability at four levels of LoD while users performed mouse lifting. The results showed a trade-off between movement error and tracking stability at varying levels of LoD. Our findings offer valuable information on optimal LoD settings, which could serve as a guide for choosing a proper mouse device for enthusiastic gamers.

+

DisMouse: Disentangling Information from Mouse Movement Data

+

Authors: Guanhua Zhang, Zhiming Hu, Andreas Bulling

+

Link

+

Abstract: Mouse movement data contain rich information about users, performed tasks, and user interfaces, but separating the respective components remains challenging and unexplored. As a first step to address this challenge, we propose DisMouse – the first method to disentangle user-specific and user-independent information and stochastic variations from mouse movement data. At the core of our method is an autoencoder trained in a semi-supervised fashion, consisting of a self-supervised denoising diffusion process and a supervised contrastive user identification module. Through evaluations on three datasets, we show that DisMouse 1) captures complementary information of mouse input, hence providing an interpretable framework for modelling mouse movements, 2) can be used to produce refined features, thus enabling various applications such as personalised and variable mouse data generation, and 3) generalises across different datasets. Taken together, our results underline the significant potential of disentangled representation learning for explainable, controllable, and generalised mouse behaviour modelling.

+

Wheeler: A Three-Wheeled Input Device for Usable, Efficient, and Versatile Non-Visual Interaction

+

HONORABLE_MENTION

+

Authors: Md Touhidul Islam, Noushad Sojib, Imran Kabir, Ashiqur Rahman Amit, Mohammad Ruhul Amin, Syed Masum Billah

+

Link

+

Abstract: Blind users rely on keyboards and assistive technologies like screen readers to interact with user interface (UI) elements. In modern applications with complex UI hierarchies, navigating to different UI elements poses a significant accessibility challenge. Users must listen to screen reader audio descriptions and press relevant keyboard keys one at a time. This paper introduces Wheeler, a novel three-wheeled, mouse-shaped stationary input device, to address this issue. Informed by participatory sessions, Wheeler enables blind users to navigate up to three hierarchical levels in an app independently using three wheels instead of navigating just one level at a time using a keyboard. The three wheels also offer versatility, allowing users to repurpose them for other tasks, such as 2D cursor manipulation. A study with 12 blind users indicates a significant reduction (40%) in navigation time compared to using a keyboard. Further, a diary study with our blind co-author highlights Wheeler's additional benefits, such as accessing UI elements with partial metadata and facilitating mixed-ability collaboration.

+

Hacking Perception

+

Predicting the Limits: Tailoring Unnoticeable Hand Redirection Offsets in Virtual Reality to Individuals’ Perceptual Boundaries

+

Authors: Martin Feick, Kora Regitz, Lukas Gehrke, André Zenner, Anthony Tang, Tobias Jungbluth, Maurice Rekrut, Antonio Krüger

+

Link

+

Abstract: Many illusion and interaction techniques in Virtual Reality (VR) rely on Hand Redirection (HR), which has proved to be effective as long as the introduced offsets between the position of the real and virtual hand do not noticeably disturb the user experience. Yet calibrating HR offsets is a tedious and time-consuming process involving psychophysical experimentation, and the resulting thresholds are known to be affected by many variables---limiting HR's practical utility. As a result, there is a clear need for alternative methods that allow tailoring HR to the perceptual boundaries of individual users. We conducted an experiment with 18 participants combining movement, eye gaze and EEG data to detect HR offsets Below, At, and Above individuals' detection thresholds. Our results suggest that we can distinguish HR At and Above from no HR. Our exploration provides a promising new direction with potentially strong implications for the broad field of VR illusions.

+

Modulating Heart Activity and Task Performance using Haptic Heartbeat Feedback: A Study Across Four Body Placements

+

Authors: Andreia Valente, Dajin Lee, Seungmoon Choi, Mark Billinghurst, Augusto Esteves

+

Link

+

Abstract: This paper explores the impact of vibrotactile haptic feedback on heart activity when the feedback is provided at four different body locations (chest, wrist, neck, and ankle) and with two feedback rates (50 bpm and 110 bpm). A user study found that the neck placement resulted in higher heart rates and lower heart rate variability, and higher frequencies correlated with increased heart rates and decreased heart rate variability. The chest was preferred in self-reported metrics, and neck placement was perceived as less satisfying, harmonious, and immersive. This research contributes to understanding the interplay between psychological experiences and physiological responses when using haptic biofeedback resembling real body signals.

+

Augmented Breathing via Thermal Feedback in the Nose

+

Authors: Jas Brooks, Alex Mazursky, Janice Hixon, Pedro Lopes

+

Link

+

Abstract: We propose, engineer, and study a novel method to augment the feeling of breathing—enabling interactive applications to let users feel like they are inhaling more/less air (perceived nasal airflow). We achieve this effect by cooling or heating the nose in sync with the user’s inhalation. Our illusion builds on the physiology of breathing: we perceive our breath predominantly through the cooling of our nasal cavities during inhalation. This is why breathing in a “fresh” cold environment feels easier than in a “stuffy” hot environment, even when the inhaled volume is the same. Our psychophysical study confirmed that our in-nose temperature stimulation significantly influenced breathing perception in both directions: making it feel harder & easier to breathe. Further, we found that ~90% of the trials were described as a change in perceived airflow/breathing, while only ~8% as temperature. Following, we engineered a compact device worn across the septum that uses Peltier elements. We illustrate the potential of this augmented breathing in interactive contexts, such as for virtual reality (e.g., rendering ease of breathing crisp air or difficulty breathing with a deteriorated gas mask) and everyday interactions (e.g., in combination with a relaxation application or to alleviate the perceived breathing resistance when wearing a mask).

+

Thermal In Motion: Designing Thermal Flow Illusions with Tactile and Thermal Interaction

+

Authors: Yatharth Singhal, Daniel Honrales, Haokun Wang, Jin Ryong Kim

+

Link

+

Abstract: This study presents a novel method for creating moving thermal sensations by integrating the thermal referral illusion with tactile motion. Conducted through three experiments on human forearms, the first experiment examined the impact of temperature and thermal actuator placement on perceived thermal motion, finding the clearest perception with a centrally positioned actuator under both hot and cold conditions. The second experiment identified the speed thresholds of perceived thermal motion, revealing a wider detectable range in hot conditions (1.8 cm/s to 9.5cm/s) compared to cold conditions (2.4cm/s to 5.0cm/s). Finally, we integrated our approach into virtual reality (VR) to assess its feasibility through two interaction scenarios. Our results shed light on the comprehension of thermal perception and its integration with tactile cues, promising significant advancements in incorporating thermal motion into diverse thermal interfaces for immersive VR experiences.

+

New realities

+

SIM2VR: Towards Automated Biomechanical Testing in VR

+

Authors: Florian Fischer, Aleksi Ikkala, Markus Klar, Arthur Fleig, Miroslav Bachinski, Roderick Murray-Smith, Perttu Hämäläinen, Antti Oulasvirta, Jörg Müller

+

Link

+

Abstract: Automated biomechanical testing has great potential for the development of VR applications, as initial insights into user behaviour can be gained in silico early in the design process. +In particular, it allows prediction of user movements and ergonomic variables, such as fatigue, prior to conducting user studies. +However, there is a fundamental disconnect between simulators hosting state-of-the-art biomechanical user models and simulators used to develop and run VR applications. +Existing user simulators often struggle to capture the intricacies of real-world VR applications, reducing ecological validity of user predictions. +In this paper, we introduce SIM2VR, a system that aligns user simulation with a given VR application by establishing a continuous closed loop between the two processes. +This, for the first time, enables training simulated users directly in the same VR application that real users interact with. +We demonstrate that SIM2VR can predict differences in user performance, ergonomics and strategies in a fast-paced, dynamic arcade game. In order to expand the scope of automated biomechanical testing beyond simple visuomotor tasks, advances in cognitive models and reward function design will be needed.

+

Hands-on, Hands-off: Gaze-Assisted Bimanual 3D Interaction

+

Authors: Mathias Lystbæk, Thorbjørn Mikkelsen, Roland Krisztandl, Eric Gonzalez, Mar Gonzalez-Franco, Hans Gellersen, Ken Pfeuffer

+

Link

+

Abstract: Extended Reality (XR) systems with hand-tracking support direct manipulation of objects with both hands. A common interaction in this context is for the non-dominant hand (NDH) to orient an object for input by the dominant hand (DH). We explore bimanual interaction with gaze through three new modes of interaction where the input of the NDH, DH, or both hands is indirect based on Gaze+Pinch. These modes enable a new dynamic interplay between our hands, allowing flexible alternation between and pairing of complementary operations. Through applications, we demonstrate several use cases in the context of 3D modelling, where users exploit occlusion-free, low-effort, and fluid two-handed manipulation. To gain a deeper understanding of each mode, we present a user study on an asymmetric rotate-translate task. Most participants preferred indirect input with both hands for lower physical effort, without a penalty on user performance. Otherwise, they preferred modes where the NDH oriented the object directly, supporting preshaping of the hand, which is more challenging with indirect gestures. The insights gained are of relevance for the design of XR interfaces that aim to leverage eye and hand input in tandem.

+

Pro-Tact: Hierarchical Synthesis of Proprioception and Tactile Exploration for Eyes-Free Ray Pointing on Out-of-View VR Menus

+

Authors: Yeonsu Kim, Jisu Yim, Kyunghwan Kim, Yohan Yun, Geehyuk Lee

+

Link

+

Abstract: We introduce Pro-Tact, a novel eyes-free pointing technique for interacting with out-of-view (OoV) VR menus. This technique combines rapid rough pointing using proprioception with fine-grain adjustments through tactile exploration, enabling menu interaction without visual attention. Our user study demonstrated that Pro-Tact allows users to select menu items accurately (95% accuracy for 54 items) in an eyes-free manner, with reduced fatigue and sickness compared to eyes-engaged interaction. Additionally, we observed that participants voluntarily interacted with OoV menus eyes-free when Pro-Tact's tactile feedback was provided in practical VR application usage contexts. This research contributes by introducing the novel interaction technique, Pro-Tact, and quantitatively evaluating its benefits in terms of performance, user experience, and user preference in OoV menu interactions.

+

GradualReality: Enhancing Physical Object Interaction in Virtual Reality via Interaction State-Aware Blending

+

Authors: HyunA Seo, Juheon Yi, Rajesh Balan, Youngki Lee

+

Link

+

Abstract: We present GradualReality, a novel interface enabling a Cross Reality experience that includes gradual interaction with physical objects in a virtual environment and supports both presence and usability. Daily Cross Reality interaction is challenging as the user's physical object interaction state is continuously changing over time, causing their attention to frequently shift between the virtual and physical worlds. As such, presence in the virtual environment and seamless usability for interacting with physical objects should be maintained at a high level. To address this issue, we present an Interaction State-Aware Blending approach that (i) balances immersion and interaction capability and (ii) provides a fine-grained, gradual transition between virtual and physical worlds. The key idea includes categorizing the flow of physical object interaction into multiple states and designing novel blending methods that offer optimal presence and sufficient physical awareness at each state. We performed extensive user studies and interviews with a working prototype and demonstrated that GradualReality provides better Cross Reality experiences compared to baselines.

+

StegoType: Surface Typing from Egocentric Cameras

+

Authors: Mark Richardson, Fadi Botros, Yangyang Shi, Pinhao Guo, Bradford Snow, Linguang Zhang, Jingming Dong, Keith Vertanen, Shugao Ma, Robert Wang

+

Link

+

Abstract: Text input is a critical component of any general purpose computing system, yet efficient and natural text input remains a challenge in AR and VR. Headset based hand-tracking has recently become pervasive among consumer VR devices and affords the opportunity to enable touch typing on virtual keyboards. We present an approach for decoding touch typing on uninstrumented flat surfaces using only egocentric camera-based hand-tracking as input. While egocentric hand-tracking accuracy is limited by issues like self occlusion and image fidelity, we show that a sufficiently diverse training set of hand motions paired with typed text can enable a deep learning model to extract signal from this noisy input. +Furthermore, by carefully designing a closed-loop data collection process, we can train an end-to-end text decoder that accounts for natural sloppy typing on virtual keyboards.
+We evaluate our work with a user study (n=18) showing a mean online throughput of 42.4 WPM with an uncorrected error rate (UER) of 7% with our method compared to a physical keyboard baseline of 74.5 WPM at 0.8% UER, showing progress towards unlocking productivity and high throughput use cases in AR/VR.

+

Eye-Hand Movement of Objects in Near Space Extended Reality

+

Authors: Uta Wagner, Andreas Asferg Jacobsen, Tiare Feuchtner, Hans Gellersen, Ken Pfeuffer

+

Link

+

Abstract: Hand-tracking in Extended Reality (XR) enables moving objects in near space with direct hand gestures, to pick, drag and drop objects in 3D. In this work, we investigate the use of eye-tracking to reduce the effort involved in this interaction. As the eyes naturally look ahead to the target for a drag operation, the principal idea is to map the translation of the object in the image plane to gaze, such that the hand only needs to control the depth component of the operation. We have implemented four techniques that explore two factors: the use of gaze only to move objects in X-Y vs.\ extra refinement by hand, and the use of hand input in + the Z axis to directly move objects vs.\ indirectly via a transfer function. We compared all four techniques in a user study (N=24) against baselines of direct and indirect hand input. We detail user performance, effort and experience trade-offs and show that all eye-hand techniques significantly reduce physical effort over direct gestures, pointing toward effortless drag-and-drop for XR environments.

+

Prototyping

+

ProtoDreamer: A Mixed-prototype Tool Combining Physical Model and Generative AI to Support Conceptual Design

+

Authors: Hongbo ZHANG, Pei Chen, Xuelong Xie, Chaoyi Lin, Lianyan Liu, Zhuoshu Li, Weitao You, Lingyun Sun

+

Link

+

Abstract: Prototyping serves as a critical phase in the industrial conceptual design process, enabling exploration of problem space and identification of solutions. Recent advancements in large-scale generative models have enabled AI to become a co-creator in this process. However, designers often consider generative AI challenging due to the necessity to follow computer-centered interaction rules, diverging from their familiar design materials and languages. Physical prototype is a commonly used design method, offering unique benefits in prototype process, such as intuitive understanding and tangible testing. In this study, we propose ProtoDreamer, a mixed-prototype tool that synergizes generative AI with physical prototype to support conceptual design. ProtoDreamer allows designers to construct preliminary prototypes using physical materials, while AI recognizes these forms and vocal inputs to generate diverse design alternatives. This tool empowers designers to tangibly interact with prototypes, intuitively convey design intentions to AI, and continuously draw inspiration from the generated artifacts. An evaluation study confirms ProtoDreamer’s utility and strengths in time efficiency, creativity support, defects exposure, and detailed thinking facilitation.

+

TorqueCapsules: Fully-Encapsulated Flywheel Actuation Modules for Designing and Prototyping Movement-Based and Kinesthetic Interaction

+

Authors: Willa Yunqi Yang, Yifan Zou, Jingle Huang, Raouf Abujaber, Ken Nakagaki

+

Link

+

Abstract: Flywheels are unique, versatile actuators that store and convert kinetic energy to torque, widely utilized in aerospace, robotics, haptics, and more. However, prototyping interaction using flywheels is not trivial due to safety concerns, unintuitive operation, and implementation challenges. +We present TorqueCapsules: self-contained, fully-encapsulated flywheel actuation modules that make the flywheel actuators easy to control, safe to interact with, and quick to reconfigure and customize. By fully encapsulating the actuators with a wireless microcontroller, a battery, and other components, the module can be readily attached, embedded, or stuck to everyday objects, worn to people’s bodies, or combined with other devices. With our custom GUI, both novices and expert users can easily control multiple modules to design and prototype movements and kinesthetic haptics unique to flywheel actuation. We demonstrate various applications, including actuated everyday objects, wearable haptics, and expressive robots. We conducted workshops for novices and experts to employ TorqueCapsules to collect qualitative feedback and further application examples.

+

AniCraft: Crafting Everyday Objects as Physical Proxies for Prototyping 3D Character Animation in Mixed Reality

+

Authors: Boyu Li, Linping Yuan, Zhe Yan, Qianxi Liu, Yulin Shen, Zeyu Wang

+

Link

+

Abstract: We introduce AniCraft, a mixed reality system for prototyping 3D character animation using physical proxies crafted from everyday objects. Unlike existing methods that require specialized equipment to support the use of physical proxies, AniCraft only requires affordable markers, webcams, and daily accessible objects and materials. AniCraft allows creators to prototype character animations through three key stages: selection of virtual characters, fabrication of physical proxies, and manipulation of these proxies to animate the characters. This authoring workflow is underpinned by diverse physical proxies, manipulation types, and mapping strategies, which ease the process of posing virtual characters and mapping user interactions with physical proxies to animated movements of virtual characters. We provide a range of cases and potential applications to demonstrate how diverse physical proxies can inspire user creativity. User experiments show that our system can outperform traditional animation methods for rapid prototyping. Furthermore, we provide insights into the benefits and usage patterns of different materials, which lead to design implications for future research.

+

Mul-O: Encouraging Olfactory Innovation in Various Scenarios Through a Task-Oriented Development Platform

+

Authors: Peizhong Gao, Fan Liu, Di Wen, Yuze Gao, Linxin Zhang, Chikelei Wang, Qiwei Zhang, Yu Zhang, Shao-en Ma, Qi Lu, Haipeng Mi, YINGQING XU

+

Link

+

Abstract: Olfactory interfaces are pivotal in HCI, yet their development is hindered by limited application scenarios, stifling the discovery of new research opportunities. This challenge primarily stems from existing design tools focusing predominantly on odor display devices and the creation of standalone olfactory experiences, rather than enabling rapid adaptation to various contexts and tasks. Addressing this, we introduce Mul-O, a novel task-oriented development platform crafted to aid semi-professionals in navigating the diverse requirements of potential application scenarios and effectively prototyping ideas. +Mul-O facilitates the swift association and integration of olfactory experiences into functional designs, system integrations, and concept validations. Comprising a web UI for task-oriented development, an API server for seamless third-party integration, and wireless olfactory display hardware, Mul-O significantly enhances the ideation and prototyping process in multisensory tasks. This was verified by a 15-day workshop attended by 30 participants. The workshop produced seven innovative projects, underscoring Mul-O's efficacy in fostering olfactory innovation.

+

Sustainable Interfaces

+

Degrade to Function: Towards Eco-friendly Morphing Devices that Function Through Programmed Sequential Degradation

+

Authors: Qiuyu Lu, Semina Yi, Mengtian Gan, Jihong Huang, Xiao Zhang, Yue Yang, Chenyi Shen, Lining Yao

+

Link

+

Abstract: While it seems counterintuitive to think of degradation within an operating device as beneficial, one may argue that when rationally designed, the controlled breakdown of materials—physical, chemical, or biological—can be harnessed for specific functions. To apply this principle to the design of morphing devices, we introduce the concept of "Degrade to Function" (DtF). This concept aims to create eco-friendly and self-contained morphing devices that operate through a sequence of environmentally-triggered degradations. We explore its design considerations and implementation techniques by identifying environmental conditions and degradation types that can be exploited, evaluating potential materials capable of controlled degradation, suggesting designs for structures that can leverage degradation to achieve various transformations and functions, and developing sequential control approaches that integrate degradation triggers. To demonstrate the viability and versatility of this design strategy, we showcase several application examples across a range of environmental conditions.

+

WasteBanned: Supporting Zero Waste Fashion Design Through Linked Edits

+

Authors: Ruowang Zhang, Stefanie Mueller, Gilbert Bernstein, Adriana Schulz, Mackenzie Leake

+

Link

+

Abstract: The commonly used cut-and-sew garment construction process, in which 2D fabric panels are cut from sheets of fabric and assembled into 3D garments, contributes to widespread textile waste in the fashion industry. There is often a significant divide between the design of the garment and the layout of the panels. One opportunity for bridging this gap is the emerging study and practice of zero waste fashion design, which involves creating clothing designs with maximum layout efficiency. Enforcing the strict constraints of zero waste sewing is challenging, as edits to one region of the garment necessarily affect neighboring panels. Based on our formative work to understand this emerging area within fashion design, we present WasteBanned, a tool that combines CAM and CAD to help users prioritize efficient material usage, work within these zero waste constraints, and edit existing zero waste garment patterns. Our user evaluation indicates that our tool helps fashion designers edit zero waste patterns to fit different bodies and add stylistic variation, while creating highly efficient fabric layouts.

+

HoloChemie - Sustainable Fabrication of Soft Biochemical Holographic Devices for Ubiquitous Sensing

+

Authors: Sutirtha Roy, Moshfiq-Us-Saleheen Chowdhury, Jurjaan Noim, Richa Pandey, Aditya Shekhar Nittala

+

Link

+

Abstract: Sustainable fabrication approaches and biomaterials are increasingly being used in HCI to fabricate interactive devices. However, the majority of the work has focused on integrating electronics. This paper takes a sustainable approach to exploring the fabrication of biochemical sensing devices. Firstly, we contribute a set of biochemical formulations for biological and environmental sensing with bio-sourced and environment-friendly substrate materials. Our formulations are based on a combination of enzymes derived from bacteria and fungi, plant extracts and commercially available chemicals to sense both liquid and gaseous analytes: glucose, lactic acid, pH levels and carbon dioxide. Our novel holographic sensing scheme allows for detecting the presence of analytes and enables quantitative estimation of the analyte levels. We present a set of application scenarios that demonstrate the versatility of our approach and discuss the sustainability aspects, its limitations, and the implications for bio-chemical systems in HCI.

+

Sound & Music

+

SonoHaptics: An Audio-Haptic Cursor for Gaze-Based Object Selection in XR

+

Authors: Hyunsung Cho, Naveen Sendhilnathan, Michael Nebeling, Tianyi Wang, Purnima Padmanabhan, Jonathan Browder, David Lindlbauer, Tanya Jonker, Kashyap Todi

+

Link

+

Abstract: We introduce SonoHaptics, an audio-haptic cursor for gaze-based 3D object selection. SonoHaptics addresses challenges around providing accurate visual feedback during gaze-based selection in Extended Reality (XR), e.g., lack of world-locked displays in no- or limited-display smart glasses and visual inconsistencies. To enable users to distinguish objects without visual feedback, SonoHaptics employs the concept of cross-modal correspondence in human perception to map visual features of objects (color, size, position, material) to audio-haptic properties (pitch, amplitude, direction, timbre). We contribute data-driven models for determining cross-modal mappings of visual features to audio and haptic features, and a computational approach to automatically generate audio-haptic feedback for objects in the user's environment. SonoHaptics provides global feedback that is unique to each object in the scene, and local feedback to amplify differences between nearby objects. Our comparative evaluation shows that SonoHaptics enables accurate object identification and selection in a cluttered scene without visual feedback.

+

SonifyAR: Context-Aware Sound Generation in Augmented Reality

+

Authors: Xia Su, Jon Froehlich, Eunyee Koh, Chang Xiao

+

Link

+

Abstract: Sound plays a crucial role in enhancing user experience and immersiveness in Augmented Reality (AR). However, current platforms lack support for AR sound authoring due to limited interaction types, challenges in collecting and specifying context information, and difficulty in acquiring matching sound assets. We present SonifyAR, an LLM-based AR sound authoring system that generates context-aware sound effects for AR experiences. SonifyAR expands the current design space of AR sound and implements a Programming by Demonstration (PbD) pipeline to automatically collect contextual information of AR events, including virtual-content-semantics and real-world context. This context information is then processed by a large language model to acquire sound effects with Recommendation, Retrieval, Generation, and Transfer methods. To evaluate the usability and performance of our system, we conducted a user study with eight participants and created five example applications, including an AR-based science experiment, and an assistive application for low-vision AR users.

+

Auptimize: Optimal Placement of Spatial Audio Cues for Extended Reality

+

Authors: Hyunsung Cho, Alexander Wang, Divya Kartik, Emily Xie, Yukang Yan, David Lindlbauer

+

Link

+

Abstract: Spatial audio in Extended Reality (XR) provides users with better awareness of where virtual elements are placed, and efficiently guides them to events such as notifications, system alerts from different windows, or approaching avatars. Humans, however, are inaccurate in localizing sound cues, especially with multiple sources due to limitations in human auditory perception such as angular discrimination error and front-back confusion. This decreases the efficiency of XR interfaces because users misidentify from which XR element a sound is coming. To address this, we propose Auptimize, a novel computational approach for placing XR sound sources, which mitigates such localization errors by utilizing the ventriloquist effect. Auptimize disentangles the sound source locations from the visual elements and relocates the sound sources to optimal positions for unambiguous identification of sound cues, avoiding errors due to inter-source proximity and front-back confusion. Our evaluation shows that Auptimize decreases spatial audio-based source identification errors compared to playing sound cues at the paired visual-sound locations. We demonstrate the applicability of Auptimize for diverse spatial audio-based interactive XR scenarios.

+

EarHover: Mid-Air Gesture Recognition for Hearables Using Sound Leakage Signals

+

BEST_PAPER

+

Authors: Shunta Suzuki, Takashi Amesaka, Hiroki Watanabe, Buntarou Shizuki, Yuta Sugiura

+

Link

+

Abstract: We introduce EarHover, an innovative system that enables mid-air gesture input for hearables. Mid-air gesture input, which eliminates the need to touch the device and thus helps to keep hands and the device clean, has been known to have high demand based on previous surveys. However, existing mid-air gesture input methods for hearables have been limited to adding cameras or infrared sensors. By focusing on the sound leakage phenomenon unique to hearables, we have realized mid-air gesture recognition using a speaker and an external microphone that are highly compatible with hearables. The signal leaked to the outside of the device due to sound leakage can be measured by an external microphone, which detects the differences in reflection characteristics caused by the hand's speed and shape during mid-air gestures. +Among 27 types of gestures, we determined the seven most suitable gestures for EarHover in terms of signal discrimination and user acceptability. We then evaluated the gesture detection and classification performance of two prototype devices (in-ear type/open-ear type) for real-world application scenarios.

+

Towards Music-Aware Virtual Assistants

+

Authors: Alexander Wang, David Lindlbauer, Chris Donahue

+

Link

+

Abstract: We propose a system for modifying spoken notifications in a manner that is sensitive to the music a user is listening to. Spoken notifications provide convenient access to rich information without the need for a screen. Virtual assistants see prevalent use in hands-free settings such as driving or exercising, activities where users also regularly enjoy listening to music. In such settings, virtual assistants will temporarily mute a user's music to improve intelligibility. However, users may perceive these interruptions as intrusive, negatively impacting their music-listening experience. To address this challenge, we propose the concept of music-aware virtual assistants, where speech notifications are modified to resemble a voice singing in harmony with the user's music. We contribute a system that processes user music and notification text to produce a blended mix, replacing original song lyrics with the notification content. In a user study comparing musical assistants to standard virtual assistants, participants expressed that musical assistants fit better with music, reduced intrusiveness, and provided a more delightful listening experience overall.

+

Learning to Learn

+

Patterns of Hypertext-Augmented Sensemaking

+

Authors: Siyi Zhu, Robert Haisfield, Brendan Langen, Joel Chan

+

Link

+

Abstract: The early days of HCI were marked by bold visions of hypertext as a transformative medium for augmented sensemaking, exemplified in systems like Memex, Xanadu, and NoteCards. Today, however, hypertext is often disconnected from discussions of the future of sensemaking. In this paper, we investigate how the recent resurgence in hypertext ``tools for thought'' might point to new directions for hypertext-augmented sensemaking. Drawing on detailed analyses of guided tours with 23 scholars, we describe hypertext-augmented use patterns for dealing with the core problem of revisiting and reusing existing/past ideas during scholarly sensemaking. We then discuss how these use patterns validate and extend existing knowledge of hypertext design patterns for sensemaking, and point to new design opportunities for augmented sensemaking.

+

Augmented Physics: Creating Interactive and Embedded Physics Simulations from Static Textbook Diagrams

+

BEST_PAPER

+

Authors: Aditya Gunturu, Yi Wen, Nandi Zhang, Jarin Thundathil, Rubaiat Habib Kazi, Ryo Suzuki

+

Link

+

Abstract: We introduce Augmented Physics, a machine learning-integrated authoring tool designed for creating embedded interactive physics simulations from static textbook diagrams. Leveraging recent advancements in computer vision, such as Segment Anything and Multi-modal LLMs, our web-based system enables users to semi-automatically extract diagrams from physics textbooks and generate interactive simulations based on the extracted content. These interactive diagrams are seamlessly integrated into scanned textbook pages, facilitating interactive and personalized learning experiences across various physics concepts, such as optics, circuits, and kinematics. Drawing from an elicitation study with seven physics instructors, we explore four key augmentation strategies: 1) augmented experiments, 2) animated diagrams, 3) bi-directional binding, and 4) parameter visualization. We evaluate our system through technical evaluation, a usability study (N=12), and expert interviews (N=12). Study findings suggest that our system can facilitate more engaging and personalized learning experiences in physics education.

+

Qlarify: Recursively Expandable Abstracts for Dynamic Information Retrieval over Scientific Papers

+

Authors: Raymond Fok, Joseph Chee Chang, Tal August, Amy Zhang, Daniel Weld

+

Link

+

Abstract: Navigating the vast scientific literature often starts with browsing a paper’s abstract. However, when a reader seeks additional information, not present in the abstract, they face a costly cognitive chasm during their dive into the full text. To bridge this gap, we introduce recursively expandable abstracts, a novel interaction paradigm that dynamically expands abstracts by progressively incorporating additional information from the papers’ full text. This lightweight interaction allows scholars to specify their information needs by quickly brushing over the abstract or selecting AI-suggested expandable entities. Relevant information is synthesized using a retrieval-augmented generation approach, presented as a fluid, threaded expansion of the abstract, and made efficiently verifiable via attribution to relevant source-passages in the paper. Through a series of user studies, we demonstrate the utility of recursively expandable abstracts and identify future opportunities to support low-effort and just-in-time exploration of long-form information contexts through LLM-powered interactions.

+

LessonPlanner: Assisting Novice Teachers to Prepare Pedagogy-Driven Lesson Plans with Large Language Models

+

Authors: Haoxiang Fan, Guanzheng Chen, Xingbo Wang, Zhenhui Peng

+

Link

+

Abstract: Preparing a lesson plan, e.g., a detailed road map with strategies and materials for instructing a 90-minute class, is beneficial yet challenging for novice teachers. Large language models (LLMs) can ease this process by generating adaptive content for lesson plans, which would otherwise require teachers to create from scratch or search existing resources. In this work, we first conduct a formative study with six novice teachers to understand their needs for support of preparing lesson plans with LLMs. Then, we develop LessonPlanner that assists users to interactively construct lesson plans with adaptive LLM-generated content based on Gagne's nine events. Our within-subjects study (N=12) shows that compared to the baseline ChatGPT interface, LessonPlanner can significantly improve the quality of outcome lesson plans and ease users' workload in the preparation process. Our expert interviews (N=6) further demonstrate LessonPlanner's usefulness in suggesting effective teaching strategies and meaningful educational resources. We discuss concerns on and design considerations for supporting teaching activities with LLMs.

+

Hot Interfaces

+

Fiery Hands: Designing Thermal Glove through Thermal and Tactile Integration for Virtual Object Manipulation

+

Authors: Haokun Wang, Yatharth Singhal, Hyunjae Gil, Jin Ryong Kim

+

Link

+

Abstract: We present a novel approach to render thermal and tactile feedback to the palm and fingertips through thermal and tactile integration. Our approach minimizes the obstruction of the palm and inner side of the fingers and enables virtual object manipulation while providing localized and global thermal feedback. By leveraging thermal actuators positioned strategically on the outer palm and back of the fingers in interplay with tactile actuators, our approach exploits thermal referral and tactile masking phenomena. Through a series of user studies, we validate the perception of localized thermal sensations across the palm and fingers, showcasing the ability to generate diverse thermal patterns. Furthermore, we demonstrate the efficacy of our approach in VR applications, replicating diverse thermal interactions with virtual objects. This work represents significant progress in thermal interactions within VR, offering enhanced sensory immersion at an optimal energy cost.

+

DexteriSync: A Hand Thermal I/O Exoskeleton for Morphing Finger Dexterity Experience

+

Authors: Ximing Shen, Youichi Kamiyama, Kouta Minamizawa, Jun Nishida

+

Link

+

Abstract: Skin temperature is an important physiological factor for human hand dexterity. Leveraging this feature, we engineered an exoskeleton, called DexteriSync, that can dynamically adjust the user's finger dexterity and induce different thermal perceptions by modulating finger skin temperature. This exoskeleton comprises flexible silicone-copper tube segments, 3D-printed finger sockets, a 3D-printed palm base, a pump system, and a water temperature control with a storage unit. By realising an embodied experience of compromised dexterity, DexteriSync can help product designers understand the lived experience of compromised hand dexterity, such as that of the elderly and/or neurodivergent users, when designing daily necessities for them. We validated DexteriSync via a technical evaluation and two user studies, demonstrating that it can change skin temperature, dexterity, and thermal perception. An exploratory session with design students and an autistic compromised dexterity individual, demonstrated the exoskeleton provided a more realistic experience compared to video education, and allowed them to gain higher confidence in their designs. The results advocated for the efficacy of experiencing embodied compromised finger dexterity, which can promote an understanding of the related physical challenges and lead to a more persuasive design for assistive tools.

+

Flip-Pelt: Motor-Driven Peltier Elements for Rapid Thermal Stimulation and Congruent Pressure Feedback in Virtual Reality

+

Authors: Seongjun Kang, Gwangbin Kim, Seokhyun Hwang, Jeongju Park, Ahmed Elsharkawy, SeungJun Kim

+

Link

+

Abstract: This study introduces "Flip-Pelt," a motor-driven peltier device designed to provide rapid thermal stimulation and congruent pressure feedback in virtual reality (VR) environments. Our system incorporates eight motor-driven peltier elements, allowing for the flipping of preheated or cooled elements to the opposite side. In evaluating the Flip-Pelt device, we assess user ability to distinguish between heat/cold sources by their patterns and stiffness, and its impact on enhancing haptic experiences in VR content that involves contact with various thermal sources. Our findings demonstrate that rapid thermal stimulation and congruent pressure feedback provided by Flip-Pelt enhance the recognition accuracy of thermal patterns and the stiffness of virtual objects. These features also improve haptic experiences in VR scenarios through their temporal congruency between tactile and thermal stimuli. Additionally, we discuss the scalability of the Flip-Pelt system to other body parts by proposing design prototypes.

+

Hydroptical Thermal Feedback: Spatial Thermal Feedback Using Visible Lights and Water

+

Authors: Sosuke Ichihashi, Masahiko Inami, Hsin-Ni Ho, Noura Howell

+

Link

+

Abstract: We control the temperature of materials in everyday interactions, recognizing temperature's important influence on our bodies, minds, and experiences. However, thermal feedback is an under-explored modality in human-computer interaction partly due to its limited temporal (slow) and spatial (small-area and non-moving) capabilities. We introduce hydroptical thermal feedback, a spatial thermal feedback method that works by applying visible lights on body parts in water. Through physical measurements and psychophysical experiments, our results show: (1) Humans perceive thermal sensations when visible lights are cast on the skin under water, and perceived warmth is greater for lights with shorter wavelengths, (2) temporal capabilities, (3) apparent motion (spatial) of warmth and coolness sensations, and (4) hydroptical thermal feedback can support the perceptual illusion that the water itself is warmer. We propose applications, including virtual reality (VR), shared water experiences, and therapies. Overall, this paper contributes hydroptical thermal feedback as a novel method, empirical results demonstrating its unique capabilities, proposed applications, and design recommendations for using hydroptical thermal feedback. Our method introduces controlled, spatial thermal perceptions to water experiences.

+

FABulous

+

Facilitating the Parametric Definition of Geometric Properties in Programming-Based CAD

+

Authors: J Gonzalez Avila, Thomas Pietrzak, Audrey Girouard, Géry Casiez

+

Link

+

Abstract: Parametric Computer-aided design (CAD) enables the creation of reusable models by integrating variables into geometric properties, facilitating customization without a complete redesign. However, creating parametric designs in programming-based CAD presents significant challenges. Users define models in a code editor using a programming language, with the application generating a visual representation in a viewport. This process involves complex programming and arithmetic expressions to describe geometric properties, linking various object properties to create parametric designs. Unfortunately, these applications lack assistance, making the process unnecessarily demanding. We propose a solution that allows users to retrieve parametric expressions from the visual representation for reuse in the code, streamlining the design process. We demonstrated this concept through a proof-of-concept implemented in the programming-based CAD application, OpenSCAD, and conducted an experiment with 11 users. Our findings suggest that this solution could significantly reduce design errors, improve interactivity and engagement in the design process, and lower the entry barrier for newcomers by reducing the mathematical skills typically required in programming-based CAD applications

+

Rhapso: Automatically Embedding Fiber Materials into 3D Prints for Enhanced Interactivity

+

Authors: Daniel Ashbrook, Wei-Ju Lin, Nicholas Bentley, Diana Soponar, Zeyu Yan, Valkyrie Savage, Lung-Pan Cheng, Huaishu Peng, Hyunyoung Kim

+

Link

+

Abstract: We introduce Rhapso, a 3D printing system designed to embed a diverse range of continuous fiber materials within 3D objects during the printing process. This approach enables integrating properties like tensile strength, force storage and transmission, or aesthetic and tactile characteristics, directly into low-cost thermoplastic 3D prints. These functional objects can have intricate actuation, self-assembly, and sensing capabilities with little to no manual intervention. To achieve this, we modify a low-cost Fused Filament Fabrication (FFF) 3D printer, adding a stepper motor-controlled fiber spool mechanism on a gear ring above the print bed. In addition to hardware, we provide parsing software for precise fiber placement, which generates Gcode for printer operation. To illustrate the versatility of our system, we present applications that showcase its extensive design potential. Additionally, we offer comprehensive documentation and open designs, empowering others to replicate our system and explore its possibilities.

+

Speed-Modulated Ironing: High-Resolution Shade and Texture Gradients in Single-Material 3D Printing

+

Authors: Mehmet Ozdemir, Marwa AlAlawi, Mustafa Doga Dogan, Jose Martinez Castro, Stefanie Mueller, Zjenja Doubrovski

+

Link

+

Abstract: We present Speed-Modulated Ironing, a new fabrication method for programming visual and tactile properties in single-material 3D printing. We use one nozzle to 3D print and a second nozzle to reheat printed areas at varying speeds, controlling the material's temperature-response. The rapid adjustments of speed allow for fine-grained reheating, enabling high-resolution color and texture variations. We implemented our method in a tool that allows users to assign desired properties to 3D models and creates corresponding 3D printing instructions. We demonstrate our method with three temperature-responsive materials: a foaming filament, a filament with wood fibers, and a filament with cork particles. These filaments respond to temperature by changing color, roughness, transparency, and gloss. Our technical evaluation reveals the capabilities of our method in achieving sufficient resolution and color shade range that allows surface details such as small text, photos, and QR codes on 3D-printed objects. Finally, we provide application examples demonstrating the new design capabilities enabled by Speed-Modulated Ironing.

+

TRAvel Slicer: Continuous Extrusion Toolpaths for 3D Printing

+

Authors: Jaime Gould, Camila Friedman-Gerlicz, Leah Buechley

+

Link

+

Abstract: In this paper we present Travel Reduction Algorithm (TRAvel) Slicer, which minimizes travel movements in 3D printing. Conventional slicing software generates toolpaths with many travel movements--movements without material extrusion. Some 3D printers are incapable of starting and stopping extrusion and it is difficult to impossible to control the extrusion of many materials. This makes toolpaths with travel movements unsuitable for a wide range of printers and materials.

+

We developed the open-source TRAvel Slicer to enable the printing of complex 3D models on a wider range of printers and in a wider range of materials than is currently possible. TRAvel Slicer minimizes two different kinds of travel movements--what we term Inner- and Outer-Model travel. We minimize Inner-Model travel (travel within the 3D model) by generating space-filling Fermat spirals for each contiguous planar region of the model. We minimize Outer-Model travel (travels outside of the 3D model) by ordering the printing of different branches of the model, thus limiting transitions between branches. We present our algorithm and software and then demonstrate how: 1) TRAvel Slicer makes it possible to generate high-quality prints from a metal-clay material, CeraMetal, that is functionally unprintable using an off-the-shelf slicer. 2) TRAvel Slicer dramatically increases the printing efficiency of traditional plastic 3D printing compared to an off-the-shelf slicer.

+

Understanding and Supporting Debugging Workflows in CAD

+

Authors: Felix Hähnlein, Gilbert Bernstein, Adriana Schulz

+

Link

+

Abstract: One of the core promises of parametric Computer-Aided Design (CAD) is that users can easily edit their model at any point in time. +However, due to the ambiguity of changing references to intermediate, updated geometry, parametric edits can lead to reference errors which are difficult to fix in practice. +We claim that debugging reference errors remains challenging because CAD systems do not provide users with tools to understand where the error happened and how to fix it. +To address these challenges, we prototype a graphical debugging tool, DeCAD, which helps comparing CAD model states both across operations and across edits. +In a qualitative lab study, we use DeCAD as a probe to understand specific challenges that users face and what workflows they employ to overcome them. +We conclude with design implications for future debugging tool developers.

+

Haptics

+

LoopBot: Representing Continuous Haptics of Grounded Objects in Room-scale VR

+

Authors: Tetsushi Ikeda, Kazuyuki Fujita, Kumpei Ogawa, Kazuki Takashima, Yoshifumi Kitamura

+

Link

+

Abstract: In room-scale virtual reality, providing continuous haptic feedback from touching grounded objects, such as walls and handrails, has been challenging due to the user's walking range and the required force. In this study, we propose LoopBot, a novel technique to provide continuous haptic feedback from grounded objects using only a single user-following robot. Specifically, LoopBot is equipped with a loop-shaped haptic prop attached to an omnidirectional robot that scrolls to cancel out the robot's displacement, giving the user the haptic sensation that the prop is actually fixed in place, or ``grounded.'' We first introduce the interaction design space of LoopBot and, as one of its promising interaction scenarios, implement a prototype for the experience of walking while grasping handrails. A performance evaluation shows that scrolling the prop cancels $77.5\%$ of the robot's running speed on average. A preliminary user test ($N=10$) also shows that the subjective realism of the experience and the sense of the virtual handrails being grounded were significantly higher than when the prop was not scrolled. Based on these findings, we discuss possible further development of LoopBot.

+

JetUnit: Rendering Diverse Force Feedback in Virtual Reality Using Water Jets

+

Authors: Zining Zhang, Jiasheng Li, Zeyu Yan, Jun Nishida, Huaishu Peng

+

Link

+

Abstract: We propose JetUnit, a water-based VR haptic system designed to produce force feedback with a wide spectrum of intensities and frequencies through water jets. The key challenge in designing this system lies in optimizing parameters to enable the haptic device to generate force feedback that closely replicates the most intense force produced by direct water jets while ensuring the user remains dry. In this paper, we present the key design parameters of the JetUnit wearable device determined through a set of quantitative experiments and a perception study. We further conducted a user study to assess the impact of integrating our haptic solutions into virtual reality experiences. The results revealed that, by adhering to the design principles of JetUnit, the water-based haptic system is capable of delivering diverse force feedback sensations, significantly enhancing the immersive experience in virtual reality.

+

Selfrionette: A Fingertip Force-Input Controller for Continuous Full-Body Avatar Manipulation and Diverse Haptic Interactions

+

Authors: Takeru Hashimoto, Yutaro Hirao

+

Link

+

Abstract: We propose Selfrionette, a controller that uses fingertip force input to drive avatar movements in virtual reality (VR). +This system enables users to interact with virtual objects and walk in VR using only fingertip force, overcoming physical and spatial constraints. Additionally, by fixing users' fingers, it provides users with counterforces equivalent to the applied force, allowing for diverse and wide dynamic range haptic feedback by adjusting the relationship between force input and virtual movement. +To evaluate the effectiveness of the proposed method, this paper focuses on hand interaction as a first step. +In User Study 1, we measured usability and embodiment during reaching tasks under Selfrionette, body tracking, and finger tracking conditions. +In User Study 2, we investigated whether users could perceive haptic properties such as weight, friction, and compliance under the same conditions as User Study 1. +Selfrionette was found to be comparable to body tracking in realism of haptic interaction, enabling embodied avatar experiences even in limited spatial conditions.

+

SpinShot: Optimizing Both Physical and Perceived Force Feedback of Flywheel-Based, Directional Impact Handheld Devices

+

Authors: Chia-An Fan, En-Huei Wu, Chia-Yu Cheng, Yu-Cheng Chang, Alvaro Lopez, Yu Chen, Chia-Chen Chi, Yi-Sheng Chan, Ching-Yi Tsai, Mike Chen

+

Link

+

Abstract: Real-world impact, such as hitting a tennis ball and a baseball, generates instantaneous, directional impact forces. However, current ungrounded force feedback technologies, such as air jets and propellers, can only generate directional impulses that are 10x-10,000x weaker. We present SpinShot, a flywheel-based device with a solenoid-actuated stopper capable of generating directional impulse of 22Nm in 1ms, which is more than 10x stronger than prior ungrounded directional technologies. Furthermore, we present a novel force design that reverses the flywheel immediately after the initial impact, to significantly increase the perceived magnitude. We conducted a series of two formative, perceptual studies (n=16, 18), followed by a summative user experience study (n=16) that compared SpinShot vs. moving mass (solenoid) and vs. air jets in a VR baseball hitting game. Results showed that SpinShot significantly improved realism, immersion, magnitude (p < .01) compared to both baselines, but significantly reduced comfort vs. air jets primarily due to the 2.9x device weight. Overall, SpinShot was preferred by 63-75% of the participants.

+

Vision-based UIs

+

Vision-Based Hand Gesture Customization from a Single Demonstration

+

Authors: Soroush Shahi, Vimal Mollyn, Cori Tymoszek Park, Runchang Kang, Asaf Liberman, Oron Levy, Jun Gong, Abdelkareem Bedri, Gierad Laput

+

Link

+

Abstract: Hand gesture recognition is becoming a more prevalent mode of human-computer interaction, especially as cameras proliferate across everyday devices. Despite continued progress in this field, gesture customization is often underexplored. Customization is crucial since it enables users to define and demonstrate gestures that are more natural, memorable, and accessible. However, customization requires efficient usage of user-provided data. We introduce a method that enables users to easily design bespoke gestures with a monocular camera from one demonstration. We employ transformers and meta-learning techniques to address few-shot learning challenges. Unlike prior work, our method supports any combination of one-handed, two-handed, static, and dynamic gestures, including different viewpoints, and the ability to handle irrelevant hand movements. We implement three real-world applications using our customization method, conduct a user study, and achieve up to 94\% average recognition accuracy from one demonstration. Our work provides a viable path for vision-based gesture customization, laying the foundation for future advancements in this domain.

+

VirtualNexus: Enhancing 360-Degree Video AR/VR Collaboration with Environment Cutouts and Virtual Replicas

+

Authors: Xincheng Huang, Michael Yin, Ziyi Xia, Robert Xiao

+

Link

+

Abstract: Asymmetric AR/VR collaboration systems bring a remote VR user to a local AR user’s physical environment, allowing them to communicate and work within a shared virtual/physical space. Such systems often display the remote environment through 3D reconstructions or 360° videos. While 360° cameras stream an environment in higher quality, they lack spatial information, making them less interactable. We present VirtualNexus, an AR/VR collaboration system that enhances 360° video AR/VR collaboration with environment cutouts and virtual replicas. VR users can define cutouts of the remote environment to interact with as a world-in-miniature, and their interactions are synchronized to the local AR perspective. Furthermore, AR users can rapidly scan and share 3D virtual replicas of physical objects using neural rendering. We demonstrated our system’s utility through 3 example applications and evaluated our system in a dyadic usability test. VirtualNexus extends the interaction space of 360° telepresence systems, offering improved physical presence, versatility, and clarity in interactions.

+

Personal Time-Lapse

+

Authors: Nhan Tran, Ethan Yang, Angelique Taylor, Abe Davis

+

Link

+

Abstract: Our bodies are constantly in motion—from the bending of arms and legs to the less conscious movement of breathing, our precise shape and location change constantly. This can make subtler developments (e.g., the growth of hair, or the healing of a wound) difficult to observe. Our work focuses on helping users record and visualize this type of subtle, longer-term change. We present a mobile tool that combines custom 3D tracking with interactive visual feedback and computational imaging to capture personal time-lapse, which approximates longer-term video of the subject (typically, part of the capturing user’s body) under a fixed viewpoint, body pose, and lighting condition. These personal time-lapses offer a powerful and detailed way to track visual changes of the subject over time. We begin with a formative study that examines what makes personal time-lapse so difficult to capture. Building on our findings, we motivate the design of our capture tool, evaluate this design with users, and demonstrate its effectiveness in a variety of challenging examples.

+

Chromaticity Gradient Mapping for Interactive Control of Color Contrast in Images and Video

+

Authors: Ruyu Yan, Jiatian Sun, Abe Davis

+

Link

+

Abstract: We present a novel perceptually-motivated interactive tool for using color contrast to enhance details represented in the lightness channel of images and video. Our method lets users adjust the perceived contrast of different details by manipulating local chromaticity while preserving the original lightness of individual pixels. Inspired by the use of similar chromaticity mappings in painting, our tool effectively offers contrast along a user-selected gradient of chromaticities as additional bandwidth for representing and enhancing different details in an image. We provide an interface for our tool that closely resembles the familiar design of tonal contrast curve controls that are available in most professional image editing software. We show that our tool is effective for enhancing the perceived contrast of details without altering lightness in an image and present many examples of effects that can be achieved with our method on both images and video.

+

Future of Typing

+

OptiBasePen: Mobile Base+Pen Input on Passive Surfaces by Sensing Relative Base Motion Plus Close-Range Pen Position

+

Authors: Andreas Fender, Mohamed Kari

+

Link

+

Abstract: Digital pen input devices based on absolute pen position sensing, such as Wacom Pens, support high-fidelity pen input. However, they require specialized sensing surfaces like drawing tablets, which can have a large desk footprint, constrain the possible input area, and limit mobility. In contrast, digital pens with integrated relative sensing enable mobile use on passive surfaces, but suffer from motion artifacts or require surface contact at all times, deviating from natural pen affordances. We present OptiBasePen, a device for mobile pen input on ordinary surfaces. Our prototype consists of two parts: the "base" on which the hand rests and the pen for fine-grained input. The base features a high-precision mouse sensor to sense its own relative motion, and two infrared image sensors to track the absolute pen tip position within the base's frame of reference. This enables pen input on ordinary surfaces without external cameras while also avoiding drift from pen micro-movements. In this work, we present our prototype as well as the general base+pen concept, which combines relative and absolute sensing.

+

Palmrest+: Expanding Laptop Input Space with Shear Force on Palm-Resting Area

+

Authors: Jisu Yim, Seoyeon Bae, Taejun Kim, Sunbum Kim, Geehyuk Lee

+

Link

+

Abstract: The palmrest area of laptops has the potential as an additional input space, considering its consistent palm contact during keyboard interaction. We propose Palmrest+, leveraging shear force exerted on the palmrest area. We suggest two input techniques: Palmrest Shortcut, for instant shortcut execution, and Palmrest Joystick, for continuous value input. These allow seamless and subtle input amidst keyboard typing. Evaluation of Palmrest Shortcut against conventional keyboard shortcuts revealed faster performance for applying shear force in unimanual and bimanual-manner with a significant reduction in gaze shifting. Additionally, the assessment of Palmrest Joystick against the laptop touchpad demonstrated comparable performance in selecting one- and two- dimensional targets with low-precision pointing, i.e., for short distances and large target sizes. The maximal hand displacement significantly decreased for both Palmrest Shortcut and Palmrest Joystick compared to conventional methods. These findings verify the feasibility and effectiveness of leveraging the palmrest area as an additional input space on laptops, offering promising enhanced typing-related user interaction experiences.

+

TouchInsight: Uncertainty-aware Rapid Touch and Text Input for Mixed Reality from Egocentric Vision

+

Authors: Paul Streli, Mark Richardson, Fadi Botros, Shugao Ma, Robert Wang, Christian Holz

+

Link

+

Abstract: While passive surfaces offer numerous benefits for interaction in mixed reality, reliably detecting touch input solely from head-mounted cameras has been a long-standing challenge. Camera specifics, hand self-occlusion, and rapid movements of both head and fingers introduce considerable uncertainty about the exact location of touch events. Existing methods have thus not been capable of achieving the performance needed for robust interaction. +In this paper, we present a real-time pipeline that detects touch input from all ten fingers on any physical surface, purely based on egocentric hand tracking. Our method TouchInsight comprises a neural network to predict the moment of a touch event, the finger making contact, and the touch location. TouchInsight represents locations through a bivariate Gaussian distribution to account for uncertainties due to sensing inaccuracies, which we resolve through contextual priors to accurately infer intended user input. +We first evaluated our method offline and found that it locates input events with a mean error of 6.3 mm, and accurately detects touch events (F1=0.99) and identifies the finger used (F1=0.96). In an online evaluation, we then demonstrate the effectiveness of our approach for a core application of dexterous touch input: two-handed text entry. In our study, participants typed 37.0 words per minute with an uncorrected error rate of 2.9% on average.

+

Can Capacitive Touch Images Enhance Mobile Keyboard Decoding?

+

Authors: Piyawat Lertvittayakumjorn, Shanqing Cai, Billy Dou, Cedric Ho, Shumin Zhai

+

Link

+

Abstract: Capacitive touch sensors capture the two-dimensional spatial profile (referred to as a touch heatmap) of a finger's contact with a mobile touchscreen. However, the research and design of touchscreen mobile keyboards -- one of the most speed and accuracy demanding touch interfaces -- has focused on the location of the touch centroid derived from the touch image heatmap as the input, discarding the rest of the raw spatial signals. In this paper, we investigate whether touch heatmaps can be leveraged to further improve the tap decoding accuracy for mobile touchscreen keyboards. Specifically, we developed and evaluated machine-learning models that interpret user taps by using the centroids and/or the heatmaps as their input and studied the contribution of the heatmaps to model performance. The results show that adding the heatmap into the input feature set led to 21.4% relative reduction of character error rates on average, compared to using the centroid alone. Furthermore, we conducted a live user study with the centroid-based and heatmap-based decoders built into Pixel 6 Pro devices and observed lower error rate, faster typing speed, and higher self-reported satisfaction score based on the heatmap-based decoder than the centroid-based decoder. These findings underline the promise of utilizing touch heatmaps for improving typing experience in mobile keyboards.

+

Bodily Signals

+

Empower Real-World BCIs with NIRS-X: An Adaptive Learning Framework that Harnesses Unlabeled Brain Signals

+

Authors: Liang Wang, Jiayan Zhang, Jinyang Liu, Devon McKeon, David Guy Brizan, Giles Blaney, Robert Jacob

+

Link

+

Abstract: Brain-Computer Interfaces (BCIs) using functional near-infrared spectroscopy (fNIRS) hold promise for future interactive user interfaces due to their ease of deployment and declining cost. However, they typically require a separate calibration process for each user and task, which can be burdensome. Machine learning helps, but faces a data scarcity problem. Due to inherent inter-user variations in physiological data, it has been typical to create a new annotated training dataset for every new task and user. To reduce dependence on such extensive data collection and labeling, we present an adaptive learning framework, NIRS-X, to harness more easily accessible unlabeled fNIRS data. NIRS-X includes two key components: NIRSiam and NIRSformer. We use the NIRSiam algorithm to extract generalized brain activity representations from unlabeled fNIRS data obtained from previous users and tasks, and then transfer that knowledge to new users and tasks. In conjunction, we design a neural network, NIRSformer, tailored for capturing both local and global, spatial and temporal relationships in multi-channel fNIRS brain input signals. By using unlabeled data from both a previously released fNIRS2MW visual $n$-back dataset and a newly collected fNIRS2MW audio $n$-back dataset, NIRS-X demonstrates its strong adaptation capability to new users and tasks. Results show comparable or superior performance to supervised methods, making NIRS-X promising for real-world fNIRS-based BCIs.

+

Understanding the Effects of Restraining Finger Coactivation in Mid-Air Typing: from a Neuromechanical Perspective

+

Authors: Hechuan Zhang, Xuewei Liang, Ying Lei, Yanjun Chen, Zhenxuan He, Yu Zhang, Lihan Chen, Hongnan Lin, Teng Han, Feng Tian

+

Link

+

Abstract: Typing in mid-air is often perceived as intuitive yet presents challenges due to finger coactivation, a neuromechanical phenomenon that involves involuntary finger movements stemming from the lack of physical constraints. Previous studies were used to examine and address the impacts of finger coactivation using algorithmic approaches. Alternatively, this paper explores the neuromechanical effects of finger coactivation on mid-air typing, aiming to deepen our understanding and provide valuable insights to improve these interactions. We utilized a wearable device that restrains finger coactivation as a prop to conduct two mid-air studies, including a rapid finger-tapping task and a ten-finger typing task. The results revealed that restraining coactivation not only reduced mispresses, which is a classic coactivated error always considered as harm caused by coactivation. Unexpectedly, the reduction of motor control errors and spelling errors, thinking as non-coactivated errors, also be observed. +Additionally, the study evaluated the neural resources involved in motor execution using functional Near Infrared Spectroscopy (fNIRS), which tracked cortical arousal during mid-air typing. The findings demonstrated decreased activation in the primary motor cortex of the left hemisphere when coactivation was restrained, suggesting a diminished motor execution load. This reduction suggests that a portion of neural resources is conserved, which also potentially aligns with perceived lower mental workload and decreased frustration levels.

+

What is Affective Touch Made Of? A Soft Capacitive Sensor Array Reveals the Interplay between Shear, Normal Stress and Individuality

+

Authors: Devyani McLaren, Jian Gao, Xiulun Yin, Rúbia Reis Guerra, Preeti Vyas, Chrys Morton, Xi Laura Cang, Yizhong Chen, Yiyuan Sun, Ying Li, John Madden, Karon MacLean

+

Link

+

Abstract: Humans physically express emotion by modulating parameters that register on mammalian skin mechanoreceptors, but are unavailable in current touch-sensing technology. +Greater sensory richness combined with data on affect-expression composition is a prerequisite to estimating affect from touch, with applications including physical human-robot interaction. To examine shear alongside more easily captured normal stresses, we tailored recent capacitive technology to attain performance suitable for affective touch, creating a flexible, reconfigurable and soft 36-taxel array that detects multitouch normal and 2-dimensional shear at ranges of 1.5kPa-43kPa and $\pm$ 0.3-3.8kPa respectively, wirelessly at ~43Hz (1548 taxels/s). In a deep-learning classification of 9 gestures (N=16), inclusion of shear data improved accuracy to 88\%, compared to 80\% with normal stress data alone, confirming shear stress's expressive centrality. +Using this rich data, we analyse the interplay of sensed-touch features, gesture attributes and individual differences, propose affective-touch sensing requirements, and share technical considerations for performance and practicality.

+

Exploring the Effects of Sensory Conflicts on Cognitive Fatigue in VR Remappings

+

HONORABLE_MENTION

+

Authors: Tianren Luo, Gaozhang Chen, Yijian Wen, Pengxiang Wang, yachun fan, Teng Han, Feng Tian

+

Link

+

Abstract: Virtual reality (VR) is found to present significant cognitive challenges due to its immersive nature and frequent sensory conflicts. This study systematically investigates the impact of sensory conflicts induced by VR remapping techniques on cognitive fatigue, and unveils their correlation. We utilized three remapping methods (haptic repositioning, head-turning redirection, and giant resizing) to create different types of sensory conflicts, and measured perceptual thresholds to induce various intensities of the conflicts. Through experiments involving cognitive tasks along with subjective and physiological measures, we found that all three remapping methods influenced the onset and severity of cognitive fatigue, with visual-vestibular conflict having the greatest impact. Interestingly, visual-experiential/memory conflict showed a mitigating effect on cognitive fatigue, emphasizing the role of novel sensory experiences. This study contributes to a deeper understanding of cognitive fatigue under sensory conflicts and provides insights for designing VR experiences that align better with human perceptual and cognitive capabilities.

+

Shared Spaces

+

BlendScape: Enabling End-User Customization of Video-Conferencing Environments through Generative AI

+

HONORABLE_MENTION

+

Authors: Shwetha Rajaram, Nels Numan, Bala Kumaravel, Nicolai Marquardt, Andrew Wilson

+

Link

+

Abstract: Today’s video-conferencing tools support a rich range of professional and social activities, but their generic meeting environments cannot be dynamically adapted to align with distributed collaborators’ needs. To enable end-user customization, we developed BlendScape, a rendering and composition system for video-conferencing participants to tailor environments to their meeting context by leveraging AI image generation techniques. BlendScape supports flexible representations of task spaces by blending users’ physical or digital backgrounds into unified environments and implements multimodal interaction techniques to steer the generation. Through an exploratory study with 15 end-users, we investigated whether and how they would find value in using generative AI to customize video-conferencing environments. Participants envisioned using a system like BlendScape to facilitate collaborative activities in the future, but required further controls to mitigate distracting or unrealistic visual elements. We implemented scenarios to demonstrate BlendScape's expressiveness for supporting environment design strategies from prior work and propose composition techniques to improve the quality of environments.

+

MyWebstrates: Webstrates as Local-first Software

+

Authors: Clemens Klokmose, James Eagan, Peter van Hardenberg

+

Link

+

Abstract: Webstrates are web substrates, a practical realization of shareable dynamic media under which distributability, shareability, and malleability are fundamental software principles. Webstrates blur the distinction between application and document in a way that enables users to share, repurpose, and refit software across a variety of domains, but its reliance on a central server constrains its use; it is at odds with personal and collective control of data; and limits applications to the web. We extend the fundamental principles to include interoperability and sovereignty over data and propose MyWebstrates, an implementation of Webstrates on top of a new, lower-level substrate for synchronization built around local-first software principles. MyWebstrates registers itself in the user’s browser and function as a piece of local software that can selectively synchronise data over sync servers or peer-to-peer connections. We show how MyWebstrates extends Webstrates to enable offline collaborative use, interoperate between Webstrates on non-web technologies such as Unity, and maintain personal and collective sovereignty over data. We demonstrate how this enables new types of applications of Webstrates and discuss limitations of this approach and new challenges that it reveals.

+

SituationAdapt: Contextual UI Optimization in Mixed Reality with Situation Awareness via LLM Reasoning

+

Authors: Zhipeng Li, Christoph Gebhardt, Yves Inglin, Nicolas Steck, Paul Streli, Christian Holz

+

Link

+

Abstract: Mixed Reality is increasingly used in mobile settings beyond controlled home and office spaces. This mobility introduces the need for user interface layouts that adapt to varying contexts. However, existing adaptive systems are designed only for static environments. In this paper, we introduce SituationAdapt, a system that adjusts Mixed Reality UIs to real-world surroundings by considering environmental and social cues in shared settings. Our system consists of perception, reasoning, and optimization modules for UI adaptation. Our perception module identifies objects and individuals around the user, while our reasoning module leverages a Vision-and-Language Model to assess the placement of interactive UI elements. This ensures that adapted layouts do not obstruct relevant environmental cues or interfere with social norms. Our optimization module then generates Mixed Reality interfaces that account for these considerations as well as temporal constraints The evaluation of SituationAdapt is two-fold: We first validate our reasoning component’s capability in assessing UI contexts comparable to human expert users. In an online user study, we then established our system’s capability of producing context-aware MR layouts, where it outperformed adaptive methods from previous work. We further demonstrate the versatility and applicability of SituationAdapt with a set of application scenarios.

+

Desk2Desk: Optimization-based Mixed Reality Workspace Integration for Remote Side-by-side Collaboration

+

Authors: Ludwig Sidenmark, Tianyu Zhang, Leen Al Lababidi, Jiannan Li, Tovi Grossman

+

Link

+

Abstract: Mixed Reality enables hybrid workspaces where physical and virtual monitors are adaptively created and moved to suit the current environment and needs. However, in shared settings, individual users’ workspaces are rarely aligned and can vary significantly in the number of monitors, available physical space, and workspace layout, creating inconsistencies between workspaces which may cause confusion and reduce collaboration. We present Desk2Desk, an optimization-based approach for remote collaboration in which the hybrid workspaces of two collaborators are fully integrated to enable immersive side-by-side collaboration. The optimization adjusts each user’s workspace in layout and number of shared monitors and creates a mapping between workspaces to handle inconsistencies between workspaces due to physical constraints (e.g. physical monitors). We show in a user study how our system adaptively merges dissimilar physical workspaces to enable immersive side-by-side collaboration, and demonstrate how an optimization-based approach can effectively address dissimilar physical layouts.

+

SpaceBlender: Creating Context-Rich Collaborative Spaces Through Generative 3D Scene Blending

+

Authors: Nels Numan, Shwetha Rajaram, Bala Kumaravel, Nicolai Marquardt, Andrew Wilson

+

Link

+

Abstract: There is increased interest in using generative AI to create 3D spaces for virtual reality (VR) applications. However, today’s models produce artificial environments, falling short of supporting collaborative tasks that benefit from incorporating the user's physical context. To generate environments that support VR telepresence, we introduce SpaceBlender, a novel pipeline that utilizes generative AI techniques to blend users' physical surroundings into unified virtual spaces. This pipeline transforms user-provided 2D images into context-rich 3D environments through an iterative process consisting of depth estimation, mesh alignment, and diffusion-based space completion guided by geometric priors and adaptive text prompts. In a preliminary within-subjects study, where 20 participants performed a collaborative VR affinity diagramming task in pairs, we compared SpaceBlender with a generic virtual environment and a state-of-the-art scene generation framework, evaluating its ability to create virtual spaces suitable for collaboration. Participants appreciated the enhanced familiarity and context provided by SpaceBlender but also noted complexities in the generative environments that could detract from task focus. Drawing on participant feedback, we propose directions for improving the pipeline and discuss the value and design of blended spaces for different scenarios.

+

AI & Automation

+

Memolet: Reifying the Reuse of User-AI Conversational Memories

+

Authors: Ryan Yen, Jian Zhao

+

Link

+

Abstract: As users engage more frequently with AI conversational agents, conversations may exceed their memory capacity, leading to failures in correctly leveraging certain memories for tailored responses. However, in finding past memories that can be reused or referenced, users need to retrieve relevant information in various conversations and articulate to the AI their intention to reuse these memories. To support this process, we introduce Memolet, an interactive object that reifies memory reuse. Users can directly manipulate Memolet to specify which memories to reuse and how to use them. We developed a system demonstrating Memolet's interaction across various memory reuse stages, including memory extraction, organization, prompt articulation, and generation refinement. We examine the system's usefulness with an N=12 within-subject study and provide design implications for future systems that support user-AI conversational memory reusing.

+

VIME: Visual Interactive Model Explorer for Identifying Capabilities and Limitations of Machine Learning Models for Sequential Decision-Making

+

Authors: Anindya Das Antar, Somayeh Molaei, Yan-Ying Chen, Matthew Lee, Nikola Banovic

+

Link

+

Abstract: Ensuring that Machine Learning (ML) models make correct and meaningful inferences is necessary for the broader adoption of such models into high-stakes decision-making scenarios. Thus, ML model engineers increasingly use eXplainable AI (XAI) tools to investigate the capabilities and limitations of their ML models before deployment. However, explaining sequential ML models, which make a series of decisions at each timestep, remains challenging. We present Visual Interactive Model Explorer (VIME), an XAI toolbox that enables ML model engineers to explain decisions of sequential models in different what-if'' scenarios. Our evaluation with 14 ML experts, who investigated two existing sequential ML models using VIME and a baseline XAI toolbox to explorewhat-if'' scenarios, showed that VIME made it easier to identify and explain instances when the models made wrong decisions compared to the baseline. Our work informs the design of future interactive XAI mechanisms for evaluating sequential ML-based decision support systems.

+

SERENUS: Alleviating Low-Battery Anxiety Through Real-time, Accurate, and User-Friendly Energy Consumption Prediction of Mobile Applications

+

Authors: Sera Lee, Dae R. Jeong, Junyoung Choi, Jaeheon Kwak, Seoyun Son, Jean Song, Insik Shin

+

Link

+

Abstract: Low-battery anxiety has emerged as a result of growing dependence on mobile devices, where the anxiety arises when the battery level runs low. While battery life can be extended through power-efficient hardware and software optimization techniques, low-battery anxiety will remain a phenomenon as long as mobile devices rely on batteries. In this paper, we investigate how an accurate real-time energy consumption prediction at the application-level can improve the user experience in low-battery situations. We present Serenus, a mobile system framework specifically tailored to predict the energy consumption of each mobile application and present the prediction in a user-friendly manner. We conducted user studies using Serenus to verify that highly accurate energy consumption predictions can effectively alleviate low-battery anxiety by assisting users in planning their application usage based on the remaining battery life. We summarize requirements to mitigate users’ anxiety, guiding the design of future mobile system frameworks.

+

Poses as Input

+

SolePoser: Real-Time 3D Human Pose Estimation using Insole Pressure Sensors

+

Authors: Erwin Wu, Rawal Khirodkar, Hideki Koike, Kris Kitani

+

Link

+

Abstract: We propose SolePoser, a real-time 3D pose estimation system that leverages only a single pair of insole sensors. Unlike conventional methods relying on fixed cameras or bulky wearable sensors, our approach offers minimal and natural setup requirements. The proposed system utilizes pressure and IMU sensors embedded in insoles to capture the body weight's pressure distribution at the feet and its 6 DoF acceleration. This information is used to estimate the 3D full-body joint position by a two-stream transformer network. A novel double-cycle consistency loss and a cross-attention module are further introduced to learn the relationship between 3D foot positions and their pressure distributions. +We also introduced two different datasets of sports and daily exercises, offering 908k frames across eight different activities. Our experiments show that our method's performance is on par with top-performing approaches, which utilize more IMUs and even outperform third-person-view camera-based methods in certain scenarios.

+

Gait Gestures: Examining Stride and Foot Strike Variation as an Input Method While Walking

+

Authors: Ching-Yi Tsai, Ryan Yen, Daekun Kim, Daniel Vogel

+

Link

+

Abstract: Walking is a cyclic pattern of alternating footstep strikes, with each pair of steps forming a stride, and a series of strides forming a gait. We conduct a systematic examination of different kinds of intentional variations from a normal gait that could be used as input actions without interrupting overall walking progress. A design space of 22 candidate Gait Gestures is generated by adapting previous standing foot input actions and identifying new actions possible in a walking context. A formative study (n=25) examines movement easiness, social acceptability, and walking compatibility with foot movement logging to calculate temporal and spatial characteristics. Using a categorization of these results, 7 gestures are selected for a wizard-of-oz prototype demonstrating an AR interface controlled by Gait Gestures for ordering food and audio playback while walking. As a technical proof-of-concept, a gait gesture recognizer is developed and tested using the formative study data.

+

EgoTouch: On-Body Touch Input Using AR/VR Headset Cameras

+

Authors: Vimal Mollyn, Chris Harrison

+

Link

+

Abstract: In augmented and virtual reality (AR/VR) experiences, a user’s arms and hands can provide a convenient and tactile surface for touch input. Prior work has shown on-body input to have significant speed, accuracy, and ergonomic benefits over in-air interfaces, which are common today. In this work, we demonstrate high accuracy, bare hands (i.e., no special instrumentation of the user) skin input using just an RGB camera, like those already integrated into all modern XR headsets. Our results show this approach can be accurate, and robust across diverse lighting conditions, skin tones, and body motion (e.g., input while walking). Finally, our pipeline also provides rich input metadata including touch force, finger identification, angle of attack, and rotation. We believe these are the requisite technical ingredients to more fully unlock on-skin interfaces that have been well motivated in the HCI literature but have lacked robust and practical methods.

+

MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices

+

Authors: Vasco Xu, Chenfeng Gao, Henry Hoffman, Karan Ahuja

+

Link

+

Abstract: There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few.

+

Touchscreen-based Hand Tracking for Remote Whiteboard Interaction

+

Authors: Xinshuang Liu, Yizhong Zhang, Xin Tong

+

Link

+

Abstract: In whiteboard-based remote communication, the seamless integration of drawn content and hand-screen interactions is essential for an immersive user experience. Previous methods either require bulky device setups for capturing hand gestures or fail to accurately track the hand poses from capacitive images. In this paper, we present a real-time method for precise tracking 3D poses of both hands from capacitive video frames. To this end, we develop a deep neural network to identify hands and infer hand joint positions from capacitive frames, and then recover 3D hand poses from the hand-joint positions via a constrained inverse kinematic solver. Additionally, we design a device setup for capturing high-quality hand-screen interaction data and obtained a more accurate synchronized capacitive video and hand pose dataset. Our method improves the accuracy and stability of 3D hand tracking for capacitive frames while maintaining a compact device setup for remote communication. We validate our scheme design and its superior performance on 3D hand pose tracking and demonstrate the effectiveness of our method in whiteboard-based remote communication.

+

SeamPose: Repurposing Seams as Capacitive Sensors in a Shirt for Upper-Body Pose Tracking

+

Authors: Tianhong Yu, Mary Zhang, Peter He, Chi-Jung Lee, Cassidy Cheesman, Saif Mahmud, Ruidong Zhang, Francois Guimbretiere, Cheng Zhang

+

Link

+

Abstract: Seams are areas of overlapping fabric formed by stitching two or more pieces of fabric together in the cut-and-sew apparel manufacturing process. In SeamPose, we repurposed seams as capacitive sensors in a shirt for continuous upper-body pose estimation. Compared to previous all-textile motion-capturing garments that place the electrodes on the clothing surface, our solution leverages existing seams inside of a shirt by machine-sewing insulated conductive threads over the seams. The unique invisibilities and placements of the seams afford the sensing shirt to look and wear similarly as a conventional shirt while providing exciting pose-tracking capabilities. To validate this approach, we implemented a proof-of-concept untethered shirt with 8 capacitive sensing seams. With a 12-participant user study, our customized deep-learning pipeline accurately estimates the relative (to the pelvis) upper-body 3D joint positions with a mean per joint position error (MPJPE) of 6.0 cm. SeamPose represents a step towards unobtrusive integration of smart clothing for everyday pose estimation.

+

AI as Copilot

+

DiscipLink: Unfolding Interdisciplinary Information Seeking Process via Human-AI Co-Exploration

+

Authors: Chengbo Zheng, Yuanhao Zhang, Zeyu Huang, Chuhan Shi, Minrui Xu, Xiaojuan Ma

+

Link

+

Abstract: Interdisciplinary studies often require researchers to explore literature in diverse branches of knowledge. Yet, navigating through the highly scattered knowledge from unfamiliar disciplines poses a significant challenge. In this paper, we introduce DiscipLink, a novel interactive system that facilitates collaboration between researchers and large language models (LLMs) in interdisciplinary information seeking (IIS). Based on users' topic of interest, DiscipLink initiates exploratory questions from the perspectives of possible relevant fields of study, and users can further tailor these questions. DiscipLink then supports users in searching and screening papers under selected questions by automatically expanding queries with disciplinary-specific terminologies, extracting themes from retrieved papers, and highlighting the connections between papers and questions. Our evaluation, comprising a within-subject comparative experiment and an open-ended exploratory study, reveals that DiscipLink can effectively support researchers in breaking down disciplinary boundaries and integrating scattered knowledge in diverse fields. The findings underscore the potential of LLM-powered tools in fostering information-seeking practices and bolstering interdisciplinary research.

+

Improving Steering and Verification in AI-Assisted Data Analysis with Interactive Task Decomposition

+

Authors: Majeed Kazemitabaar, Jack Williams, Ian Drosos, Tovi Grossman, Austin Henley, Carina Negreanu, Advait Sarkar

+

Link

+

Abstract: LLM-powered tools like ChatGPT Data Analysis, have the potential to help users tackle the challenging task of data analysis programming, which requires expertise in data processing, programming, and statistics. However, our formative study (n=15) uncovered serious challenges in verifying AI-generated results and steering the AI (i.e., guiding the AI system to produce the desired output). We developed two contrasting approaches to address these challenges. The first (Stepwise) decomposes the problem into step-by-step subgoals with pairs of editable assumptions and code until task completion, while the second (Phasewise) decomposes the entire problem into three editable, logical phases: structured input/output assumptions, execution plan, and code. A controlled, within-subjects experiment (n=18) compared these systems against a conversational baseline. Users reported significantly greater control with the Stepwise and Phasewise systems, and found intervention, correction, and verification easier, compared to the baseline. The results suggest design guidelines and trade-offs for AI-assisted data analysis tools.

+

VizGroup: An AI-assisted Event-driven System for Collaborative Programming Learning Analytics

+

Authors: Xiaohang Tang, Sam Wong, Kevin Pu, Xi Chen, Yalong Yang, Yan Chen

+

Link

+

Abstract: Programming instructors often conduct collaborative learning activities, like Peer Instruction, to foster a deeper understanding in students and enhance their engagement with learning. These activities, however, may not always yield productive outcomes due to the diversity of student mental models and their ineffective collaboration. In this work, we introduce VizGroup, an AI-assisted system that enables programming instructors to easily oversee students' real-time collaborative learning behaviors during large programming courses. VizGroup leverages Large Language Models (LLMs) to recommend event specifications for instructors so that they can simultaneously track and receive alerts about key correlation patterns between various collaboration metrics and ongoing coding tasks. We evaluated VizGroup with 12 instructors in a comparison study using a dataset collected from a Peer Instruction activity that was conducted in a large programming lecture. +The results showed that VizGroup helped instructors effectively overview, narrow down, and track nuances throughout students' behaviors.

+

Who did it? How User Agency is influenced by Visual Properties of Generated Images

+

Authors: Johanna Didion, Krzysztof Wolski, Dennis Wittchen, David Coyle, Thomas Leimkühler, Paul Strohmeier

+

Link

+

Abstract: The increasing proliferation of AI and GenAI requires new interfaces tailored to how their specific affordances and human requirements meet. As GenAI is capable of taking over tasks from users on an unprecedented scale, designing the experience of agency -- if and how users experience control over the process and responsibility over the outcome -- is crucial. As an initial step towards design guidelines for shaping agency, we present a study that explores how features of AI-generated images influence users' experience of agency. We use two measures; temporal binding to implicitly estimate pre-reflective agency and magnitude estimation to assess user judgments of agency. We observe that abstract images lead to more temporal binding than images with semantic meaning. In contrast, the closer an image aligns with what a user might expect, the higher the agency judgment. When comparing the experiment results with objective metrics of image differences, we find that temporal binding results correlate with semantic differences, while agency judgments are better explained by local differences between images. This work contributes towards a future where agency is considered an important design dimension for GenAI interfaces.

+

FathomGPT: A Natural Language Interface for Interactively Exploring Ocean Science Data

+

Authors: Nabin Khanal, Chun Meng Yu, Jui-Cheng Chiu, Anav Chaudhary, Ziyue Zhang, Kakani Katija, Angus Forbes

+

Link

+

Abstract: We introduce FathomGPT, an open source system for the interactive investigation of ocean science data via a natural language interface. FathomGPT was developed in close collaboration with marine scientists to enable researchers and ocean enthusiasts to explore and analyze the FathomNet image database. FathomGPT provides a custom information retrieval pipeline that leverages OpenAI’s large language models to enable: the creation of complex queries to retrieve images, taxonomic information, and scientific measurements; mapping common names and morphological features to scientific names; generating interactive charts on demand; and searching by image or specified patterns within an image. In designing FathomGPT, particular emphasis was placed on enhancing the user's experience by facilitating free-form exploration and optimizing response times. We present an architectural overview and implementation details of FathomGPT, along with a series of ablation studies that demonstrate the effectiveness of our approach to name resolution, fine tuning, and prompt modification. Additionally, we present usage scenarios of interactive data exploration sessions and document feedback from ocean scientists and machine learning experts.

+

VRCopilot: Authoring 3D Layouts with Generative AI Models in VR

+

Authors: Lei Zhang, Jin Pan, Jacob Gettig, Steve Oney, Anhong Guo

+

Link

+

Abstract: Immersive authoring provides an intuitive medium for users to create 3D scenes via direct manipulation in Virtual Reality (VR). Recent advances in generative AI have enabled the automatic creation of realistic 3D layouts. However, it is unclear how capabilities of generative AI can be used in immersive authoring to support fluid interactions, user agency, and creativity. We introduce VRCopilot, a mixed-initiative system that integrates pre-trained generative AI models into immersive authoring to facilitate human-AI co-creation in VR. VRCopilot presents multimodal interactions to support rapid prototyping and iterations with AI, and intermediate representations such as wireframes to augment user controllability over the created content. Through a series of user studies, we evaluated the potential and challenges in manual, scaffolded, and automatic creation in immersive authoring. We found that scaffolded creation using wireframes enhanced the user agency compared to automatic creation. We also found that manual creation via multimodal specification offers the highest sense of creativity and agency.

+

Body as the interface

+

MouthIO: Fabricating Customizable Oral User Interfaces with Integrated Sensing and Actuation

+

Authors: Yijing Jiang, Julia Kleinau, Till Max Eckroth, Eve Hoggan, Stefanie Mueller, Michael Wessely

+

Link

+

Abstract: This paper introduces MouthIO, the first customizable intraoral user interface that can be equipped with various sensors and output components. MouthIO consists of an SLA-printed brace that houses a flexible PCB within a bite-proof enclosure positioned between the molar teeth and inner cheeks. Our MouthIO design and fabrication technique enables makers to customize the oral user interfaces in both form and function at low cost. All parts in contact with the oral cavity are made of bio-compatible materials to ensure safety, while the design takes into account both comfort and portability. We demonstrate MouthIO through three application examples ranging from beverage consumption monitoring, health monitoring, to assistive technology. Results from our full-day user study indicate high wearability and social acceptance levels, while our technical evaluation demonstrates the device's ability to withstand adult bite forces.

+

Can a Smartwatch Move Your Fingers? Compact and Practical Electrical Muscle Stimulation in a Smartwatch

+

HONORABLE_MENTION

+

Authors: Akifumi Takahashi, Yudai Tanaka, Archit Tamhane, Alan Shen, Shan-Yuan Teng, Pedro Lopes

+

Link

+

Abstract: Smartwatches gained popularity in the mainstream, making them into today’s de-facto wearables. Despite advancements in sensing, haptics on smartwatches is still restricted to tactile feedback (e.g., vibration). Most smartwatch-sized actuators cannot render strong force-feedback. Simultaneously, electrical muscle stimulation (EMS) promises compact force-feedback but, to actuate fingers requires users to wear many electrodes on their forearms. While forearm electrodes provide good accuracy, they detract EMS from being a practical force-feedback interface. To address this, we propose moving the electrodes to the wrist—conveniently packing them in the backside of a smartwatch. In our first study, we found that by cross-sectionally stimulating the wrist in 1,728 trials, we can actuate thumb extension, index extension & flexion, middle flexion, pinky flexion, and wrist flexion. Following, we engineered a compact EMS that integrates directly into a smartwatch’s wristband (with a custom stimulator, electrodes, demultiplexers, and communication). In our second study, we found that participants could calibrate our device by themselves ~50% faster than with conventional EMS. Furthermore, all participants preferred the experience of this device, especially for its social acceptability & practicality. We believe that our approach opens new applications for smartwatch-based interactions, such as haptic assistance during everyday tasks.

+

Power-over-Skin: Full-Body Wearables Powered By Intra-Body RF Energy

+

Authors: Andy Kong, Daehwa Kim, Chris Harrison

+

Link

+

Abstract: Powerful computing devices are now small enough to be easily worn on the body. However, batteries pose a major design and user experience obstacle, adding weight and volume, and generally requiring periodic device removal and recharging. In response, we developed Power-over-Skin, an approach using the human body itself to deliver power to many distributed, battery-free, worn devices. We demonstrate power delivery from on-body distances as far as from head-to-toe, with sufficient energy to power microcontrollers capable of sensing and wireless communication. We share results from a study campaign that informed our implementation, as well as experiments that validate our final system. We conclude with several demonstration devices, ranging from input controllers to longitudinal bio-sensors, which highlight the efficacy and potential of our approach.

+

HandPad: Make Your Hand an On-the-go Writing Pad via Human Capacitance

+

Authors: Yu Lu, Dian Ding, Hao Pan, Yijie Li, Juntao Zhou, Yongjian Fu, Yongzhao Zhang, Yi-Chao Chen, Guangtao Xue

+

Link

+

Abstract: The convenient text input system is a pain point for devices such as AR glasses, and it is difficult for existing solutions to balance portability and efficiency. This paper introduces HandPad, the system that turns the hand into an on-the-go touchscreen, which realizes interaction on the hand via human capacitance. HandPad achieves keystroke and handwriting inputs for letters, numbers, and Chinese characters, reducing the dependency on capacitive or pressure sensor arrays. Specifically, the system verifies the feasibility of touch point localization on the hand using the human capacitance model and proposes a handwriting recognition system based on Bi-LSTM and ResNet. The transfer learning-based system only needs a small amount of training data to build a handwriting recognition model for the target user. Experiments in real environments verify the feasibility of HandPad for keystroke (accuracy of 100%) and handwriting recognition for letters (accuracy of 99.1%), numbers (accuracy of 97.6%) and Chinese characters (accuracy of 97.9%).

+

New Vizualizations

+

VisCourt: In-Situ Guidance for Interactive Tactic Training in Mixed Reality

+

Authors: Liqi Cheng, Hanze Jia, Lingyun Yu, Yihong Wu, Shuainan Ye, Dazhen Deng, Hui Zhang, Xiao Xie, Yingcai Wu

+

Link

+

Abstract: In team sports like basketball, understanding and executing tactics---coordinated plans of movements among players---are crucial yet complex, requiring extensive practice. These tactics require players to develop a keen sense of spatial and situational awareness. Traditional coaching methods, which mainly rely on basketball tactic boards and video instruction, often fail to bridge the gap between theoretical learning and the real-world application of tactics, due to shifts in view perspectives and a lack of direct experience with tactical scenarios. To address this challenge, we introduce VisCourt, a Mixed Reality (MR) tactic training system, in collaboration with a professional basketball team. To set up the MR training environment, we employed semi-automatic methods to simulate realistic 3D tactical scenarios and iteratively designed visual in-situ guidance. This approach enables full-body engagement in interactive training sessions on an actual basketball court and provides immediate feedback, significantly enhancing the learning experience. A user study with athletes and enthusiasts shows the effectiveness and satisfaction with VisCourt in basketball training and offers insights for the design of future SportsXR training systems.

+

Block and Detail: Scaffolding Sketch-to-Image Generation

+

Authors: Vishnu Sarukkai, Lu Yuan, Mia Tang, Maneesh Agrawala, Kayvon Fatahalian

+

Link

+

Abstract: We introduce a novel sketch-to-image tool that aligns with the iterative refinement process of artists. Our tool lets users sketch blocking strokes to coarsely represent the placement and form of objects and detail strokes to refine their shape and silhouettes. We develop a two-pass algorithm for generating high-fidelity images from such sketches at any point in the iterative process. In the first pass we use a ControlNet to generate an image that strictly follows all the strokes (blocking and detail) and in the second pass we add variation by renoising regions surrounding blocking strokes. We also present a dataset generation scheme that, when used to train a ControlNet architecture, allows regions that do not contain strokes to be interpreted as not-yet-specified regions rather than empty space. We show that this partial-sketch-aware ControlNet can generate coherent elements from partial sketches that only contain a small number of strokes. The high-fidelity images produced by our approach serve as scaffolds that can help the user adjust the shape and proportions of objects or add additional elements to the composition. We demonstrate the effectiveness of our approach with a variety of examples and evaluative comparisons. Quantitatively, novice viewers prefer the quality of images from our algorithm over a baseline Scribble ControlNet for 82% of the pairs and found our images had less distortion in 80% of the pairs.

+

EVE: Enabling Anyone to Train Robots using Augmented Reality

+

Authors: Jun Wang, Chun-Cheng Chang, Jiafei Duan, Dieter Fox, Ranjay Krishna

+

Link

+

Abstract: The increasing affordability of robot hardware is accelerating the integration of robots into everyday activities. However, training a robot to automate a task requires expensive trajectory data where a trained human annotator moves a physical robot to train it. Consequently, only those with access to robots produce demonstrations to train robots. In this work, we remove this restriction with EVE, an iOS app that enables everyday users to train robots using intuitive augmented reality visualizations, without needing a physical robot. With EVE, users can collect demonstrations by specifying waypoints with their hands, visually inspecting the environment for obstacles, modifying existing waypoints, and verifying collected trajectories. In a user study (N=14, D=30) consisting of three common tabletop tasks, EVE outperformed three state-of-the-art interfaces in success rate and was comparable to kinesthetic teaching—physically moving a physical robot—in completion time, usability, motion intent communication, enjoyment, and preference (mean of p=0.30). EVE allows users to train robots for personalized tasks, such as sorting desk supplies, organizing ingredients, or setting up board games. We conclude by enumerating limitations and design considerations for future AR-based demonstration collection systems for robotics.

+

avaTTAR: Table Tennis Stroke Training with On-body and Detached Visualization in Augmented Reality

+

Authors: Dizhi Ma, Xiyun Hu, Jingyu Shi, Mayank Patel, Rahul Jain, Ziyi Liu, Zhengzhe Zhu, Karthik Ramani

+

Link

+

Abstract: Table tennis stroke training is a critical aspect of player development. We designed a new augmented reality (AR) system, avaTTAR, for table tennis stroke training. The system provides both “on-body” (first-person view) and “detached” (third-person view) +visual cues, enabling users to visualize target strokes and correct their attempts effectively with this dual perspectives setup. By employing a combination of pose estimation algorithms and IMU sensors, avaTTAR captures and reconstructs the 3D body pose and paddle orientation of users during practice, allowing real-time comparison with expert strokes. Through a user study, we affirm avaTTAR ’s capacity to amplify player experience and training results

+

Big to Small Fab

+

Don't Mesh Around: Streamlining Manual-Digital Fabrication Workflows with Domain-Specific 3D Scanning

+

Authors: Ilan Moyer, Sam Bourgault, Devon Frost, Jennifer Jacobs

+

Link

+

Abstract: Software-first digital fabrication workflows are often at odds with material-driven approaches to design. Material-driven design is especially critical in manual ceramics, where the craftsperson shapes the form through hands-on engagement. We present the Craft-Aligned Scanner (CAS), a 3D scanning and clay-3D printing system that enables practitioners to design for digital fabrication through traditional pottery techniques. The CAS augments a pottery wheel that has 3D printing capabilities with a precision distance sensor on a vertically oriented linear axis. By increasing the height of the sensor as the wheel turns, we directly synthesize a 3D spiralized toolpath from the geometry of the object on the wheel, enabling the craftsperson to immediately transition from manual fabrication to 3D printing without leaving the tool. We develop new digital fabrication workflows with CAS to augment scanned forms with functional features and add both procedurally and real-time-generated surface textures. CAS demonstrates how 3D printers can support material-first digital fabrication design without foregoing the expressive possibilities of software-based design.

+

E-Joint: Fabrication of Large-Scale Interactive Objects Assembled by 3D Printed Conductive Parts with Copper Plated Joints

+

Authors: Xiaolong Li, Cheng Yao, Shang Shi, Shuyue Feng, Yujie Zhou, Haoye Dong, Shichao Huang, Xueyan Cai, Kecheng Jin, Fangtian Ying, Guanyun Wang

+

Link

+

Abstract: The advent of conductive thermoplastic filaments and multi-material 3D printing has made it feasible to create interactive 3D printed objects. Yet, challenges arise due to volume constraints of desktop 3D printers and high resistive characteristics of current conductive materials, making the fabrication of large-scale or highly conductive interactive objects can be daunting. We propose E-Joint, a novel fabrication pipeline for 3D printed objects utilizing mortise and tenon joint structures combined with a copper plating process. The segmented pieces and joint structures are customized in software along with integrated circuits. Then electroplate them for enhanced conductivity. We designed four distinct electrified joint structures in experiment and evaluated the practical feasibility and effectiveness of fabricating pipes. By constructing three applications with those structures, we verified the usability of E-Joint in making large-scale interactive objects and show path to a more integrated future for manufacturing.

+

MobiPrint: A Mobile 3D Printer for Environment-Scale Design and Fabrication

+

Authors: Daniel Campos Zamora, Liang He, Jon Froehlich

+

Link

+

Abstract: 3D printing is transforming how we customize and create physical objects in engineering, accessibility, and art. However, this technology is still primarily limited to confined working areas and dedicated print beds thereby detaching design and fabrication from real-world environments and making measuring and scaling objects tedious and labor-intensive. In this paper, we present MobiPrint, a prototype mobile fabrication system that combines elements from robotics, architecture, and Human-Computer Interaction (HCI) to enable environment-scale design and fabrication in ad-hoc indoor environments. MobiPrint provides a multi-stage fabrication pipeline: first, the robotic 3D printer automatically scans and maps an indoor space; second, a custom design tool converts the map into an interactive CAD canvas for editing and placing models in the physical world; finally, the MobiPrint robot prints the object directly on the ground at the defined location. Through a "proof-by-demonstration" validation, we highlight our system's potential across different applications, including accessibility, home furnishing, floor signage, and art. We also conduct a technical evaluation to assess MobiPrint’s localization accuracy, ground surface adhesion, payload capacity, and mapping speed. We close with a discussion of open challenges and opportunities for the future of contextualized mobile fabrication.

+

StructCurves: Interlocking Block-Based Line Structures

+

Authors: Zezhou Sun, Devin Balkcom, Emily Whiting

+

Link

+

Abstract: We present a new class of curved block-based line structures whose component chains are flexible when separated, and provably rigid when assembled together into an interlocking double chain. The joints are inspired by traditional zippers, where a binding fabric or mesh connects individual teeth. +Unlike traditional zippers, the joint design produces a rigid interlock with programmable curvature. This allows fairly strong curved structures to be built out of easily stored flexible chains. +In this paper, we introduce a pipeline for generating these curved structures using a novel block design template based on revolute joints. +Mesh embedded in these structures maintains block spacing and assembly order. We evaluate the rigidity of the curved structures through mechanical performance testing and demonstrate several applications.

+

Machine Learning for User Interfaces

+

UIClip: A Data-driven Model for Assessing User Interface Design

+

Authors: Jason Wu, Yi-Hao Peng, Xin Yue Li, Amanda Swearngin, Jeffrey Bigham, Jeffrey Nichols

+

Link

+

Abstract: User interface (UI) design is a difficult yet important task for ensuring the usability, accessibility, and aesthetic qualities of applications. In our paper, we develop a machine-learned model, UIClip, for assessing the design quality and visual relevance of a UI given its screenshot and natural language description. To train UIClip, we used a combination of automated crawling, synthetic augmentation, and human ratings to construct a large-scale dataset of UIs, collated by description and ranked by design quality. Through training on the dataset, UIClip implicitly learns properties of good and bad designs by (i) assigning a numerical score that represents a UI design's relevance and quality and (ii) providing design suggestions. In an evaluation that compared the outputs of UIClip and other baselines to UIs rated by 12 human designers, we found that UIClip achieved the highest agreement with ground-truth rankings. Finally, we present three example applications that demonstrate how UIClip can facilitate downstream applications that rely on instantaneous assessment of UI design quality: (i) UI code generation, (ii) UI design tips generation, and (iii) quality-aware UI example search.

+

UICrit: Enhancing Automated Design Evaluation with a UI Critique Dataset

+

Authors: Peitong Duan, Chin-Yi Cheng, Gang Li, Bjoern Hartmann, Yang Li

+

Link

+

Abstract: Automated UI evaluation can be beneficial for the design process; for example, to compare different UI designs, or conduct automated heuristic evaluation. LLM-based UI evaluation, in particular, holds the promise of generalizability to a wide variety of UI types and evaluation tasks. However, current LLM-based techniques do not yet match the performance of human evaluators. We hypothesize that automatic evaluation can be improved by collecting a targeted UI feedback dataset and then using this dataset to enhance the performance of general-purpose LLMs. We present a targeted dataset of 3,059 design critiques and quality ratings for 983 mobile UIs, collected from seven designers, each with at least a year of professional design experience. We carried out an in-depth analysis to characterize the dataset's features. We then applied this dataset to achieve a 55\% performance gain in LLM-generated UI feedback via various few-shot and visual prompting techniques. We also discuss future applications of this dataset, including training a reward model for generative UI techniques, and fine-tuning a tool-agnostic multi-modal LLM that automates UI evaluation.

+

EyeFormer: Predicting Personalized Scanpaths with Transformer-Guided Reinforcement Learning

+

Authors: Yue Jiang, Zixin Guo, Hamed Rezazadegan Tavakoli, Luis Leiva, Antti Oulasvirta

+

Link

+

Abstract: From a visual-perception perspective, modern graphical user interfaces (GUIs) comprise a complex graphics-rich two-dimensional visuospatial arrangement of text, images, and interactive objects such as buttons and menus. While existing models can accurately predict regions and objects that are likely to attract attention ``on average'', no scanpath model has been capable of predicting scanpaths for an individual. To close this gap, we introduce EyeFormer, which utilizes a Transformer architecture as a policy network to guide a deep reinforcement learning algorithm that predicts gaze locations. Our model offers the unique capability of producing personalized predictions when given a few user scanpath samples. It can predict full scanpath information, including fixation positions and durations, across individuals and various stimulus types. Additionally, we demonstrate applications in GUI layout optimization driven by our model.

+

GPTVoiceTasker: Advancing Multi-step Mobile Task Efficiency Through Dynamic Interface Exploration and Learning

+

Authors: Minh Duc Vu, Han Wang, Jieshan Chen, Zhuang Li, Shengdong Zhao, Zhenchang Xing, Chunyang Chen

+

Link

+

Abstract: Virtual assistants have the potential to play an important role in helping users achieve different tasks. However, these systems face challenges in their real-world usability, characterized by inefficiency and struggles in grasping user intentions. Leveraging recent advances in Large Language Models (LLMs), we introduce GPTVoiceTasker, a virtual assistant poised to enhance user experiences and task efficiency on mobile devices. GPTVoiceTasker excels at intelligently deciphering user commands and executing relevant device interactions to streamline task completion. For unprecedented tasks, GPTVoiceTasker utilises the contextual information and on-screen content to continuously explore and execute the tasks. In addition, the system continually learns from historical user commands to automate subsequent task invocations, further enhancing execution efficiency. From our experiments, GPTVoiceTasker achieved 84.5% accuracy in parsing human commands into executable actions and 85.7% accuracy in automating multi-step tasks. In our user study, GPTVoiceTasker boosted task efficiency in real-world scenarios by 34.85%, accompanied by positive participant feedback. We made GPTVoiceTasker open-source, inviting further research into LLMs utilization for diverse tasks through prompt engineering and leveraging user usage data to improve efficiency.

+

VisionTasker: Mobile Task Automation Using Vision Based UI Understanding and LLM Task Planning

+

Authors: Yunpeng Song, Yiheng Bian, Yongtao Tang, Guiyu Ma, Zhongmin Cai

+

Link

+

Abstract: Mobile task automation is an emerging field that leverages AI to streamline and optimize the execution of routine tasks on mobile devices, thereby enhancing efficiency and productivity. Traditional methods, such as Programming By Demonstration (PBD), are limited due to their dependence on predefined tasks and susceptibility to app updates. Recent advancements have utilized the view hierarchy to collect UI information and employed Large Language Models (LLM) to enhance task automation. However, view hierarchies have accessibility issues and face potential problems like missing object descriptions or misaligned structures. This paper introduces VisionTasker, a two-stage framework combining vision-based UI understanding and LLM task planning, for mobile task automation in a step-by-step manner. VisionTasker firstly converts a UI screenshot into natural language interpretations using a vision-based UI understanding approach, eliminating the need for view hierarchies. Secondly, it adopts a step-by-step task planning method, presenting one interface at a time to the LLM. The LLM then identifies relevant elements within the interface and determines the next action, enhancing accuracy and practicality. Extensive experiments show that VisionTasker outperforms previous methods, providing effective UI representations across four datasets. Additionally, in automating 147 real-world tasks on an Android smartphone, VisionTasker demonstrates advantages over humans in tasks where humans show unfamiliarity and shows significant improvements when integrated with the PBD mechanism. VisionTasker is open-source and available at https://github.com/AkimotoAyako/VisionTasker.

+

Programming UI

+

NotePlayer: Engaging Jupyter Notebooks for Dynamic Presentation of Analytical Processes

+

Authors: Yang Ouyang, Leixian Shen, Yun Wang, Quan Li

+

Link

+

Abstract: Diverse presentation formats play a pivotal role in effectively conveying code and analytical processes during data analysis. One increasingly popular format is tutorial videos, particularly those based on Jupyter notebooks, which offer an intuitive interpretation of code and vivid explanations of analytical procedures. However, creating such videos requires a diverse skill set and significant manual effort, posing a barrier for many analysts. To bridge this gap, we introduce an innovative tool called NotePlayer, which connects notebook cells to video segments and incorporates a computational engine with language models to streamline video creation and editing. Our aim is to make the process more accessible and efficient for analysts. To inform the design of NotePlayer, we conducted a formative study and performed content analysis on a corpus of 38 Jupyter tutorial videos. This helped us identify key patterns and challenges encountered in existing tutorial videos, guiding the development of NotePlayer. Through a combination of a usage scenario and a user study, we validated the effectiveness of NotePlayer. The results show that the tool streamlines the video creation and facilitates the communication process for data analysts.

+

Tyche: Making Sense of Property-Based Testing Effectiveness

+

Authors: Harrison Goldstein, Jeffrey Tao, Zac Hatfield-Dodds, Benjamin Pierce, Andrew Head

+

Link

+

Abstract: Software developers increasingly rely on automated methods to assess the +correctness of their code. One such method is property-based testing +(PBT), wherein a test harness generates hundreds or thousands of inputs +and checks the outputs of the program on those inputs using parametric +properties. Though powerful, PBT induces a sizable gulf of evaluation: +developers need to put in nontrivial effort to understand how well the +different test inputs exercise the software under test. To bridge this +gulf, we propose Tyche, a user interface that supports sensemaking +around the effectiveness of property-based tests. Guided by a formative +design exploration, our design of Tyche supports developers with +interactive, configurable views of test behavior with tight integrations +into modern developer testing workflow. These views help developers +explore global testing behavior and individual test inputs alike. To +accelerate the development of powerful, interactive PBT tools, we define +a standard for PBT test reporting and integrate it with a widely used +PBT library. A self-guided online usability study revealed that Tyche's +visualizations help developers to more accurately assess software +testing effectiveness.

+

CoLadder: Manipulating Code Generation via Multi-Level Blocks

+

Authors: Ryan Yen, Jiawen Zhu, Sangho Suh, Haijun Xia, Jian Zhao

+

Link

+

Abstract: This paper adopted an iterative design process to gain insights into programmers' strategies when using LLMs for programming. We proposed CoLadder, a novel system that supports programmers by facilitating hierarchical task decomposition, direct code segment manipulation, and result evaluation during prompt authoring. A user study with 12 experienced programmers showed that CoLadder is effective in helping programmers externalize their problem-solving intentions flexibly, improving their ability to evaluate and modify code across various abstraction levels, from their task's goal to final code implementation.

+

SQLucid: Grounding Natural Language Database Queries with Interactive Explanations

+

Authors: Yuan Tian, Jonathan Kummerfeld, Toby Li, Tianyi Zhang

+

Link

+

Abstract: Though recent advances in machine learning have led to significant improvements in natural language interfaces for databases, the accuracy and reliability of these systems remain limited, especially in high-stakes domains. This paper introduces SQLucid, a novel user interface that bridges the gap between non-expert users and complex database querying processes. SQLucid addresses existing limitations by integrating visual correspondence, intermediate query results, and editable step-by-step SQL explanations in natural language to facilitate user understanding and engagement. This unique blend of features empowers users to understand and refine SQL queries easily and precisely. Two user studies and one quantitative experiment were conducted to validate SQLucid’s effectiveness, showing significant improvement in task completion accuracy and user confidence compared to existing interfaces. Our code is available at https://github.com/magic-YuanTian/SQLucid.

+

Next Gen Input

+

PointerVol: A Laser Pointer for Swept Volumetric Displays

+

Authors: Unai Javier Fernández, Iosune Sarasate Azcona, Iñigo Ezcurdia, Manuel Lopez-Amo, Ivan Fernández, Asier Marzo

+

Link

+

Abstract: A laser pointer is a commonly used device that does not require communication with the display system or modifications on the applications, the presenter can just take a pointer and start using it. When a laser pointer is used on a volumetric display, a line rather than a point appears, making it not suitable for pointing at 3D locations. PointerVol is a modified laser pointer that allows users to point to 3D positions inside a swept volumetric display. We propose two PointerVol implementations based on timing and distance measurements, we evaluate the pointing performance using them. Finally, we present other features such as multi-user pointing, line patterns and a multi-finger wearable. PointerVol is a simple device that can help to popularize volumetric displays, or at least to make them more usable for presentations with true-3D content.

+

RFTIRTouch: Touch Sensing Device for Dual-sided Transparent Plane Based on Repropagated Frustrated Total Internal Reflection

+

Authors: Ratchanon Wattanaparinton, Kotaro Kitada, Kentaro Takemura

+

Link

+

Abstract: Frustrated total internal reflection (FTIR) imaging is widely applied in various touch-sensing systems. However, vision-based touch sensing has structural constraints, and the system size tends to increase. Although a sensing system with reduced thickness has been developed recently using repropagated FTIR (RFTIR), it lacks the property of instant installation anywhere because observation from the side of a transparent medium is required. Therefore, this study proposes an "RFTIRTouch" sensing device to capture RFTIR images from the contact surface. RFTIRTouch detects the touch position on a dual-sided plane using a physics-based estimation and can be retrofitted to existing transparent media with simple calibration. Our evaluation experiments confirm that the touch position can be estimated within an error of approximately 2.1 mm under optimal conditions. Furthermore, several application examples are implemented to demonstrate the advantages of RFTIRTouch, such as its ability to measure dual sides with a single sensor and waterproof the contact surface.

+

IRIS: Wireless Ring for Vision-based Smart Home Interaction

+

Authors: Maruchi Kim, Antonio Glenn, Bandhav Veluri, Yunseo Lee, Eyoel Gebre, Aditya Bagaria, Shwetak Patel, Shyamnath Gollakota

+

Link

+

Abstract: Integrating cameras into wireless smart rings has been challenging due to size and power constraints. We introduce IRIS, the first wireless vision-enabled smart ring system for smart home interactions. Equipped with a camera, Bluetooth radio, inertial measurement unit (IMU), and an onboard battery, IRIS meets the small size, weight, and power (SWaP) requirements for ring devices. IRIS is context-aware, adapting its gesture set to the detected device, and can last for 16-24 hours on a single charge. IRIS leverages the scene semantics to achieve instance-level device recognition. In a study involving 23 participants, IRIS consistently outpaced voice commands, with a higher proportion of participants expressing a preference for IRIS over voice commands regarding toggling a device's state, granular control, and social acceptability. Our work pushes the boundary of what is possible with ring form-factor devices, addressing system challenges and opening up novel interaction capabilities.

+

Silent Impact: Tracking Tennis Shots from the Passive Arm

+

Authors: Junyong Park, Saelyne Yang, Sungho Jo

+

Link

+

Abstract: Wearable technology has transformed sports analytics, offering new dimensions in enhancing player experience. Yet, many solutions involve cumbersome setups that inhibit natural motion. In tennis, existing products require sensors on the racket or dominant arm, causing distractions and discomfort. We propose Silent Impact, a novel and user-friendly system that analyzes tennis shots using a sensor placed on the passive arm. Collecting Inertial Measurement Unit sensor data from 20 recreational tennis players, we developed neural networks that exclusively utilize passive arm data to detect and classify six shots, achieving a classification accuracy of 88.2% and a detection F1 score of 86.0%, comparable to the dominant arm. These models were then incorporated into an end-to-end prototype, which records passive arm motion through a smartwatch and displays a summary of shots on a mobile app. User study (N=10) showed that participants felt less burdened physically and mentally using Silent Impact on the passive arm. Overall, our research establishes the passive arm as an effective, comfortable alternative for tennis shot analysis, advancing user-friendly sports analytics.

+

LLM: New applications

+

VoicePilot: Harnessing LLMs as Speech Interfaces for Assistive Robotics

+

Authors: Akhil Padmanabha, Jessie Yuan, Janavi Gupta, Zulekha Karachiwalla, Carmel Majidi, Henny Admoni, Zackory Erickson

+

Link

+

Abstract: Physically assistive robots present an opportunity to significantly increase the well-being and independence of individuals with motor impairments or other forms of disability who are unable to complete activities of daily living. Speech interfaces, especially ones that utilize Large Language Models (LLMs), can enable individuals to effectively and naturally communicate high-level commands and nuanced preferences to robots. Frameworks for integrating LLMs as interfaces to robots for high level task planning and code generation have been proposed, but fail to incorporate human-centric considerations which are essential while developing assistive interfaces. In this work, we present a framework for incorporating LLMs as speech interfaces for physically assistive robots, constructed iteratively with 3 stages of testing involving a feeding robot, culminating in an evaluation with 11 older adults at an independent living facility. We use both quantitative and qualitative data from the final study to validate our framework and additionally provide design guidelines for using LLMs as speech interfaces for assistive robots. Videos, code, and supporting files are located on our project website\footnote{\url{https://sites.google.com/andrew.cmu.edu/voicepilot/}}

+

ComPeer: A Generative Conversational Agent for Proactive Peer Support

+

Authors: Tianjian Liu, Hongzheng Zhao, Yuheng Liu, Xingbo Wang, Zhenhui Peng

+

Link

+

Abstract: Conversational Agents (CAs) acting as peer supporters have been widely studied and demonstrated beneficial for people's mental health. However, previous peer support CAs either are user-initiated or follow predefined rules to initiate the conversations, which may discourage users to engage and build relationships with the CAs for long-term benefits. In this paper, we develop ComPeer, a generative CA that can proactively offer adaptive peer support to users. ComPeer leverages large language models to detect and reflect significant events in the dialogue, enabling it to strategically plan the timing and content of proactive care. In addition, ComPeer incorporates peer support strategies, conversation history, and its persona into the generative messages. Our one-week between-subjects study (N=24) demonstrates ComPeer's strength in providing peer support over time and boosting users' engagement compared to a baseline user-initiated CA. We report users' interaction patterns with ComPeer and discuss implications for designing proactive generative agents to promote people's well-being.

+

SHAPE-IT: Exploring Text-to-Shape-Display for Generative Shape-Changing Behaviors with LLMs

+

Authors: Wanli Qian, Chenfeng Gao, Anup Sathya, Ryo Suzuki, Ken Nakagaki

+

Link

+

Abstract: This paper introduces text-to-shape-display, a novel approach to generating dynamic shape changes in pin-based shape displays through natural language commands. By leveraging large language models (LLMs) and AI-chaining, our approach allows users to author shape-changing behaviors on demand through text prompts without programming. We describe the foundational aspects necessary for such a system, including the identification of key generative elements (primitive, animation, and interaction) and design requirements to enhance user interaction, based on formative exploration and iterative design processes. Based on these insights, we develop SHAPE-IT, an LLM-based authoring tool for a 24 x 24 shape display, which translates the user's textual command into executable code and allows for quick exploration through a web-based control interface. We evaluate the effectiveness of SHAPE-IT in two ways: 1) performance evaluation and 2) user evaluation (N= 10). The study conclusions highlight the ability to facilitate rapid ideation of a wide range of shape-changing behaviors with AI. However, the findings also expose accuracy-related challenges and limitations, prompting further exploration into refining the framework for leveraging AI to better suit the unique requirements of shape-changing systems.

+

WaitGPT: Monitoring and Steering Conversational LLM Agent in Data Analysis with On-the-Fly Code Visualization

+

Authors: Liwenhan Xie, Chengbo Zheng, Haijun Xia, Huamin Qu, Chen Zhu-Tian

+

Link

+

Abstract: Large language models (LLMs) support data analysis through conversational user interfaces, as exemplified in OpenAI's ChatGPT (formally known as Advanced Data Analysis or Code Interpreter). Essentially, LLMs produce code for accomplishing diverse analysis tasks. However, presenting raw code can obscure the logic and hinder user verification. To empower users with enhanced comprehension and augmented control over analysis conducted by LLMs, we propose a novel approach to transform LLM-generated code into an interactive visual representation. In the approach, users are provided with a clear, step-by-step visualization of the LLM-generated code in real time, allowing them to understand, verify, and modify individual data operations in the analysis. Our design decisions are informed by a formative study (N=8) probing into user practice and challenges. We further developed a prototype named WaitGPT and conducted a user study (N=12) to evaluate its usability and effectiveness. The findings from the user study reveal that WaitGPT facilitates monitoring and steering of data analysis performed by LLMs, enabling participants to enhance error detection and increase their overall confidence in the results.

+

Break Q&A: Haptics

+

LoopBot: Representing Continuous Haptics of Grounded Objects in Room-scale VR

+

Authors: Tetsushi Ikeda, Kazuyuki Fujita, Kumpei Ogawa, Kazuki Takashima, Yoshifumi Kitamura

+

Link

+

Abstract: In room-scale virtual reality, providing continuous haptic feedback from touching grounded objects, such as walls and handrails, has been challenging due to the user's walking range and the required force. In this study, we propose LoopBot, a novel technique to provide continuous haptic feedback from grounded objects using only a single user-following robot. Specifically, LoopBot is equipped with a loop-shaped haptic prop attached to an omnidirectional robot that scrolls to cancel out the robot's displacement, giving the user the haptic sensation that the prop is actually fixed in place, or ``grounded.'' We first introduce the interaction design space of LoopBot and, as one of its promising interaction scenarios, implement a prototype for the experience of walking while grasping handrails. A performance evaluation shows that scrolling the prop cancels $77.5\%$ of the robot's running speed on average. A preliminary user test ($N=10$) also shows that the subjective realism of the experience and the sense of the virtual handrails being grounded were significantly higher than when the prop was not scrolled. Based on these findings, we discuss possible further development of LoopBot.

+

JetUnit: Rendering Diverse Force Feedback in Virtual Reality Using Water Jets

+

Authors: Zining Zhang, Jiasheng Li, Zeyu Yan, Jun Nishida, Huaishu Peng

+

Link

+

Abstract: We propose JetUnit, a water-based VR haptic system designed to produce force feedback with a wide spectrum of intensities and frequencies through water jets. The key challenge in designing this system lies in optimizing parameters to enable the haptic device to generate force feedback that closely replicates the most intense force produced by direct water jets while ensuring the user remains dry. In this paper, we present the key design parameters of the JetUnit wearable device determined through a set of quantitative experiments and a perception study. We further conducted a user study to assess the impact of integrating our haptic solutions into virtual reality experiences. The results revealed that, by adhering to the design principles of JetUnit, the water-based haptic system is capable of delivering diverse force feedback sensations, significantly enhancing the immersive experience in virtual reality.

+

Selfrionette: A Fingertip Force-Input Controller for Continuous Full-Body Avatar Manipulation and Diverse Haptic Interactions

+

Authors: Takeru Hashimoto, Yutaro Hirao

+

Link

+

Abstract: We propose Selfrionette, a controller that uses fingertip force input to drive avatar movements in virtual reality (VR). +This system enables users to interact with virtual objects and walk in VR using only fingertip force, overcoming physical and spatial constraints. Additionally, by fixing users' fingers, it provides users with counterforces equivalent to the applied force, allowing for diverse and wide dynamic range haptic feedback by adjusting the relationship between force input and virtual movement. +To evaluate the effectiveness of the proposed method, this paper focuses on hand interaction as a first step. +In User Study 1, we measured usability and embodiment during reaching tasks under Selfrionette, body tracking, and finger tracking conditions. +In User Study 2, we investigated whether users could perceive haptic properties such as weight, friction, and compliance under the same conditions as User Study 1. +Selfrionette was found to be comparable to body tracking in realism of haptic interaction, enabling embodied avatar experiences even in limited spatial conditions.

+

SpinShot: Optimizing Both Physical and Perceived Force Feedback of Flywheel-Based, Directional Impact Handheld Devices

+

Authors: Chia-An Fan, En-Huei Wu, Chia-Yu Cheng, Yu-Cheng Chang, Alvaro Lopez, Yu Chen, Chia-Chen Chi, Yi-Sheng Chan, Ching-Yi Tsai, Mike Chen

+

Link

+

Abstract: Real-world impact, such as hitting a tennis ball and a baseball, generates instantaneous, directional impact forces. However, current ungrounded force feedback technologies, such as air jets and propellers, can only generate directional impulses that are 10x-10,000x weaker. We present SpinShot, a flywheel-based device with a solenoid-actuated stopper capable of generating directional impulse of 22Nm in 1ms, which is more than 10x stronger than prior ungrounded directional technologies. Furthermore, we present a novel force design that reverses the flywheel immediately after the initial impact, to significantly increase the perceived magnitude. We conducted a series of two formative, perceptual studies (n=16, 18), followed by a summative user experience study (n=16) that compared SpinShot vs. moving mass (solenoid) and vs. air jets in a VR baseball hitting game. Results showed that SpinShot significantly improved realism, immersion, magnitude (p < .01) compared to both baselines, but significantly reduced comfort vs. air jets primarily due to the 2.9x device weight. Overall, SpinShot was preferred by 63-75% of the participants.

+

Break Q&A: Body as the interface

+

MouthIO: Fabricating Customizable Oral User Interfaces with Integrated Sensing and Actuation

+

Authors: Yijing Jiang, Julia Kleinau, Till Max Eckroth, Eve Hoggan, Stefanie Mueller, Michael Wessely

+

Link

+

Abstract: This paper introduces MouthIO, the first customizable intraoral user interface that can be equipped with various sensors and output components. MouthIO consists of an SLA-printed brace that houses a flexible PCB within a bite-proof enclosure positioned between the molar teeth and inner cheeks. Our MouthIO design and fabrication technique enables makers to customize the oral user interfaces in both form and function at low cost. All parts in contact with the oral cavity are made of bio-compatible materials to ensure safety, while the design takes into account both comfort and portability. We demonstrate MouthIO through three application examples ranging from beverage consumption monitoring, health monitoring, to assistive technology. Results from our full-day user study indicate high wearability and social acceptance levels, while our technical evaluation demonstrates the device's ability to withstand adult bite forces.

+

Can a Smartwatch Move Your Fingers? Compact and Practical Electrical Muscle Stimulation in a Smartwatch

+

HONORABLE_MENTION

+

Authors: Akifumi Takahashi, Yudai Tanaka, Archit Tamhane, Alan Shen, Shan-Yuan Teng, Pedro Lopes

+

Link

+

Abstract: Smartwatches gained popularity in the mainstream, making them into today’s de-facto wearables. Despite advancements in sensing, haptics on smartwatches is still restricted to tactile feedback (e.g., vibration). Most smartwatch-sized actuators cannot render strong force-feedback. Simultaneously, electrical muscle stimulation (EMS) promises compact force-feedback but, to actuate fingers requires users to wear many electrodes on their forearms. While forearm electrodes provide good accuracy, they detract EMS from being a practical force-feedback interface. To address this, we propose moving the electrodes to the wrist—conveniently packing them in the backside of a smartwatch. In our first study, we found that by cross-sectionally stimulating the wrist in 1,728 trials, we can actuate thumb extension, index extension & flexion, middle flexion, pinky flexion, and wrist flexion. Following, we engineered a compact EMS that integrates directly into a smartwatch’s wristband (with a custom stimulator, electrodes, demultiplexers, and communication). In our second study, we found that participants could calibrate our device by themselves ~50% faster than with conventional EMS. Furthermore, all participants preferred the experience of this device, especially for its social acceptability & practicality. We believe that our approach opens new applications for smartwatch-based interactions, such as haptic assistance during everyday tasks.

+

Power-over-Skin: Full-Body Wearables Powered By Intra-Body RF Energy

+

Authors: Andy Kong, Daehwa Kim, Chris Harrison

+

Link

+

Abstract: Powerful computing devices are now small enough to be easily worn on the body. However, batteries pose a major design and user experience obstacle, adding weight and volume, and generally requiring periodic device removal and recharging. In response, we developed Power-over-Skin, an approach using the human body itself to deliver power to many distributed, battery-free, worn devices. We demonstrate power delivery from on-body distances as far as from head-to-toe, with sufficient energy to power microcontrollers capable of sensing and wireless communication. We share results from a study campaign that informed our implementation, as well as experiments that validate our final system. We conclude with several demonstration devices, ranging from input controllers to longitudinal bio-sensors, which highlight the efficacy and potential of our approach.

+

HandPad: Make Your Hand an On-the-go Writing Pad via Human Capacitance

+

Authors: Yu Lu, Dian Ding, Hao Pan, Yijie Li, Juntao Zhou, Yongjian Fu, Yongzhao Zhang, Yi-Chao Chen, Guangtao Xue

+

Link

+

Abstract: The convenient text input system is a pain point for devices such as AR glasses, and it is difficult for existing solutions to balance portability and efficiency. This paper introduces HandPad, the system that turns the hand into an on-the-go touchscreen, which realizes interaction on the hand via human capacitance. HandPad achieves keystroke and handwriting inputs for letters, numbers, and Chinese characters, reducing the dependency on capacitive or pressure sensor arrays. Specifically, the system verifies the feasibility of touch point localization on the hand using the human capacitance model and proposes a handwriting recognition system based on Bi-LSTM and ResNet. The transfer learning-based system only needs a small amount of training data to build a handwriting recognition model for the target user. Experiments in real environments verify the feasibility of HandPad for keystroke (accuracy of 100%) and handwriting recognition for letters (accuracy of 99.1%), numbers (accuracy of 97.6%) and Chinese characters (accuracy of 97.9%).

+

Break Q&A: Vision-based UIs

+

Vision-Based Hand Gesture Customization from a Single Demonstration

+

Authors: Soroush Shahi, Vimal Mollyn, Cori Tymoszek Park, Runchang Kang, Asaf Liberman, Oron Levy, Jun Gong, Abdelkareem Bedri, Gierad Laput

+

Link

+

Abstract: Hand gesture recognition is becoming a more prevalent mode of human-computer interaction, especially as cameras proliferate across everyday devices. Despite continued progress in this field, gesture customization is often underexplored. Customization is crucial since it enables users to define and demonstrate gestures that are more natural, memorable, and accessible. However, customization requires efficient usage of user-provided data. We introduce a method that enables users to easily design bespoke gestures with a monocular camera from one demonstration. We employ transformers and meta-learning techniques to address few-shot learning challenges. Unlike prior work, our method supports any combination of one-handed, two-handed, static, and dynamic gestures, including different viewpoints, and the ability to handle irrelevant hand movements. We implement three real-world applications using our customization method, conduct a user study, and achieve up to 94\% average recognition accuracy from one demonstration. Our work provides a viable path for vision-based gesture customization, laying the foundation for future advancements in this domain.

+

VirtualNexus: Enhancing 360-Degree Video AR/VR Collaboration with Environment Cutouts and Virtual Replicas

+

Authors: Xincheng Huang, Michael Yin, Ziyi Xia, Robert Xiao

+

Link

+

Abstract: Asymmetric AR/VR collaboration systems bring a remote VR user to a local AR user’s physical environment, allowing them to communicate and work within a shared virtual/physical space. Such systems often display the remote environment through 3D reconstructions or 360° videos. While 360° cameras stream an environment in higher quality, they lack spatial information, making them less interactable. We present VirtualNexus, an AR/VR collaboration system that enhances 360° video AR/VR collaboration with environment cutouts and virtual replicas. VR users can define cutouts of the remote environment to interact with as a world-in-miniature, and their interactions are synchronized to the local AR perspective. Furthermore, AR users can rapidly scan and share 3D virtual replicas of physical objects using neural rendering. We demonstrated our system’s utility through 3 example applications and evaluated our system in a dyadic usability test. VirtualNexus extends the interaction space of 360° telepresence systems, offering improved physical presence, versatility, and clarity in interactions.

+

Personal Time-Lapse

+

Authors: Nhan Tran, Ethan Yang, Angelique Taylor, Abe Davis

+

Link

+

Abstract: Our bodies are constantly in motion—from the bending of arms and legs to the less conscious movement of breathing, our precise shape and location change constantly. This can make subtler developments (e.g., the growth of hair, or the healing of a wound) difficult to observe. Our work focuses on helping users record and visualize this type of subtle, longer-term change. We present a mobile tool that combines custom 3D tracking with interactive visual feedback and computational imaging to capture personal time-lapse, which approximates longer-term video of the subject (typically, part of the capturing user’s body) under a fixed viewpoint, body pose, and lighting condition. These personal time-lapses offer a powerful and detailed way to track visual changes of the subject over time. We begin with a formative study that examines what makes personal time-lapse so difficult to capture. Building on our findings, we motivate the design of our capture tool, evaluate this design with users, and demonstrate its effectiveness in a variety of challenging examples.

+

Chromaticity Gradient Mapping for Interactive Control of Color Contrast in Images and Video

+

Authors: Ruyu Yan, Jiatian Sun, Abe Davis

+

Link

+

Abstract: We present a novel perceptually-motivated interactive tool for using color contrast to enhance details represented in the lightness channel of images and video. Our method lets users adjust the perceived contrast of different details by manipulating local chromaticity while preserving the original lightness of individual pixels. Inspired by the use of similar chromaticity mappings in painting, our tool effectively offers contrast along a user-selected gradient of chromaticities as additional bandwidth for representing and enhancing different details in an image. We provide an interface for our tool that closely resembles the familiar design of tonal contrast curve controls that are available in most professional image editing software. We show that our tool is effective for enhancing the perceived contrast of details without altering lightness in an image and present many examples of effects that can be achieved with our method on both images and video.

+

Break Q&A: Next Gen Input

+

PointerVol: A Laser Pointer for Swept Volumetric Displays

+

Authors: Unai Javier Fernández, Iosune Sarasate Azcona, Iñigo Ezcurdia, Manuel Lopez-Amo, Ivan Fernández, Asier Marzo

+

Link

+

Abstract: A laser pointer is a commonly used device that does not require communication with the display system or modifications on the applications, the presenter can just take a pointer and start using it. When a laser pointer is used on a volumetric display, a line rather than a point appears, making it not suitable for pointing at 3D locations. PointerVol is a modified laser pointer that allows users to point to 3D positions inside a swept volumetric display. We propose two PointerVol implementations based on timing and distance measurements, we evaluate the pointing performance using them. Finally, we present other features such as multi-user pointing, line patterns and a multi-finger wearable. PointerVol is a simple device that can help to popularize volumetric displays, or at least to make them more usable for presentations with true-3D content.

+

RFTIRTouch: Touch Sensing Device for Dual-sided Transparent Plane Based on Repropagated Frustrated Total Internal Reflection

+

Authors: Ratchanon Wattanaparinton, Kotaro Kitada, Kentaro Takemura

+

Link

+

Abstract: Frustrated total internal reflection (FTIR) imaging is widely applied in various touch-sensing systems. However, vision-based touch sensing has structural constraints, and the system size tends to increase. Although a sensing system with reduced thickness has been developed recently using repropagated FTIR (RFTIR), it lacks the property of instant installation anywhere because observation from the side of a transparent medium is required. Therefore, this study proposes an "RFTIRTouch" sensing device to capture RFTIR images from the contact surface. RFTIRTouch detects the touch position on a dual-sided plane using a physics-based estimation and can be retrofitted to existing transparent media with simple calibration. Our evaluation experiments confirm that the touch position can be estimated within an error of approximately 2.1 mm under optimal conditions. Furthermore, several application examples are implemented to demonstrate the advantages of RFTIRTouch, such as its ability to measure dual sides with a single sensor and waterproof the contact surface.

+

IRIS: Wireless Ring for Vision-based Smart Home Interaction

+

Authors: Maruchi Kim, Antonio Glenn, Bandhav Veluri, Yunseo Lee, Eyoel Gebre, Aditya Bagaria, Shwetak Patel, Shyamnath Gollakota

+

Link

+

Abstract: Integrating cameras into wireless smart rings has been challenging due to size and power constraints. We introduce IRIS, the first wireless vision-enabled smart ring system for smart home interactions. Equipped with a camera, Bluetooth radio, inertial measurement unit (IMU), and an onboard battery, IRIS meets the small size, weight, and power (SWaP) requirements for ring devices. IRIS is context-aware, adapting its gesture set to the detected device, and can last for 16-24 hours on a single charge. IRIS leverages the scene semantics to achieve instance-level device recognition. In a study involving 23 participants, IRIS consistently outpaced voice commands, with a higher proportion of participants expressing a preference for IRIS over voice commands regarding toggling a device's state, granular control, and social acceptability. Our work pushes the boundary of what is possible with ring form-factor devices, addressing system challenges and opening up novel interaction capabilities.

+

Silent Impact: Tracking Tennis Shots from the Passive Arm

+

Authors: Junyong Park, Saelyne Yang, Sungho Jo

+

Link

+

Abstract: Wearable technology has transformed sports analytics, offering new dimensions in enhancing player experience. Yet, many solutions involve cumbersome setups that inhibit natural motion. In tennis, existing products require sensors on the racket or dominant arm, causing distractions and discomfort. We propose Silent Impact, a novel and user-friendly system that analyzes tennis shots using a sensor placed on the passive arm. Collecting Inertial Measurement Unit sensor data from 20 recreational tennis players, we developed neural networks that exclusively utilize passive arm data to detect and classify six shots, achieving a classification accuracy of 88.2% and a detection F1 score of 86.0%, comparable to the dominant arm. These models were then incorporated into an end-to-end prototype, which records passive arm motion through a smartwatch and displays a summary of shots on a mobile app. User study (N=10) showed that participants felt less burdened physically and mentally using Silent Impact on the passive arm. Overall, our research establishes the passive arm as an effective, comfortable alternative for tennis shot analysis, advancing user-friendly sports analytics.

+

Break Q&A: Future of Typing

+

OptiBasePen: Mobile Base+Pen Input on Passive Surfaces by Sensing Relative Base Motion Plus Close-Range Pen Position

+

Authors: Andreas Fender, Mohamed Kari

+

Link

+

Abstract: Digital pen input devices based on absolute pen position sensing, such as Wacom Pens, support high-fidelity pen input. However, they require specialized sensing surfaces like drawing tablets, which can have a large desk footprint, constrain the possible input area, and limit mobility. In contrast, digital pens with integrated relative sensing enable mobile use on passive surfaces, but suffer from motion artifacts or require surface contact at all times, deviating from natural pen affordances. We present OptiBasePen, a device for mobile pen input on ordinary surfaces. Our prototype consists of two parts: the "base" on which the hand rests and the pen for fine-grained input. The base features a high-precision mouse sensor to sense its own relative motion, and two infrared image sensors to track the absolute pen tip position within the base's frame of reference. This enables pen input on ordinary surfaces without external cameras while also avoiding drift from pen micro-movements. In this work, we present our prototype as well as the general base+pen concept, which combines relative and absolute sensing.

+

Palmrest+: Expanding Laptop Input Space with Shear Force on Palm-Resting Area

+

Authors: Jisu Yim, Seoyeon Bae, Taejun Kim, Sunbum Kim, Geehyuk Lee

+

Link

+

Abstract: The palmrest area of laptops has the potential as an additional input space, considering its consistent palm contact during keyboard interaction. We propose Palmrest+, leveraging shear force exerted on the palmrest area. We suggest two input techniques: Palmrest Shortcut, for instant shortcut execution, and Palmrest Joystick, for continuous value input. These allow seamless and subtle input amidst keyboard typing. Evaluation of Palmrest Shortcut against conventional keyboard shortcuts revealed faster performance for applying shear force in unimanual and bimanual-manner with a significant reduction in gaze shifting. Additionally, the assessment of Palmrest Joystick against the laptop touchpad demonstrated comparable performance in selecting one- and two- dimensional targets with low-precision pointing, i.e., for short distances and large target sizes. The maximal hand displacement significantly decreased for both Palmrest Shortcut and Palmrest Joystick compared to conventional methods. These findings verify the feasibility and effectiveness of leveraging the palmrest area as an additional input space on laptops, offering promising enhanced typing-related user interaction experiences.

+

TouchInsight: Uncertainty-aware Rapid Touch and Text Input for Mixed Reality from Egocentric Vision

+

Authors: Paul Streli, Mark Richardson, Fadi Botros, Shugao Ma, Robert Wang, Christian Holz

+

Link

+

Abstract: While passive surfaces offer numerous benefits for interaction in mixed reality, reliably detecting touch input solely from head-mounted cameras has been a long-standing challenge. Camera specifics, hand self-occlusion, and rapid movements of both head and fingers introduce considerable uncertainty about the exact location of touch events. Existing methods have thus not been capable of achieving the performance needed for robust interaction. +In this paper, we present a real-time pipeline that detects touch input from all ten fingers on any physical surface, purely based on egocentric hand tracking. Our method TouchInsight comprises a neural network to predict the moment of a touch event, the finger making contact, and the touch location. TouchInsight represents locations through a bivariate Gaussian distribution to account for uncertainties due to sensing inaccuracies, which we resolve through contextual priors to accurately infer intended user input. +We first evaluated our method offline and found that it locates input events with a mean error of 6.3 mm, and accurately detects touch events (F1=0.99) and identifies the finger used (F1=0.96). In an online evaluation, we then demonstrate the effectiveness of our approach for a core application of dexterous touch input: two-handed text entry. In our study, participants typed 37.0 words per minute with an uncorrected error rate of 2.9% on average.

+

Can Capacitive Touch Images Enhance Mobile Keyboard Decoding?

+

Authors: Piyawat Lertvittayakumjorn, Shanqing Cai, Billy Dou, Cedric Ho, Shumin Zhai

+

Link

+

Abstract: Capacitive touch sensors capture the two-dimensional spatial profile (referred to as a touch heatmap) of a finger's contact with a mobile touchscreen. However, the research and design of touchscreen mobile keyboards -- one of the most speed and accuracy demanding touch interfaces -- has focused on the location of the touch centroid derived from the touch image heatmap as the input, discarding the rest of the raw spatial signals. In this paper, we investigate whether touch heatmaps can be leveraged to further improve the tap decoding accuracy for mobile touchscreen keyboards. Specifically, we developed and evaluated machine-learning models that interpret user taps by using the centroids and/or the heatmaps as their input and studied the contribution of the heatmaps to model performance. The results show that adding the heatmap into the input feature set led to 21.4% relative reduction of character error rates on average, compared to using the centroid alone. Furthermore, we conducted a live user study with the centroid-based and heatmap-based decoders built into Pixel 6 Pro devices and observed lower error rate, faster typing speed, and higher self-reported satisfaction score based on the heatmap-based decoder than the centroid-based decoder. These findings underline the promise of utilizing touch heatmaps for improving typing experience in mobile keyboards.

+

Break Q&A: Storytime

+

Story-Driven: Exploring the Impact of Providing Real-time Context Information on Automated Storytelling

+

Authors: Jan Henry Belz, Lina Weilke, Anton Winter, Philipp Hallgarten, Enrico Rukzio, Tobias Grosse-Puppendahl

+

Link

+

Abstract: Stories have long captivated the human imagination with narratives that enrich our lives. Traditional storytelling methods are often static and not designed to adapt to the listener’s environment, which is full of dynamic changes. For instance, people often listen to stories in the form of podcasts or audiobooks while traveling in a car. Yet, conventional in-car storytelling systems do not embrace the adaptive potential of this space. The advent of generative AI is the key to creating content that is not just personalized but also responsive to the changing parameters of the environment. We introduce a novel system for interactive, real-time story narration that leverages environment and user context in correspondence with estimated arrival times to adjust the generated story continuously. Through two comprehensive real-world studies with a total of 30 participants in a vehicle, we assess the user experience, level of immersion, and perception of the environment provided by the prototype. Participants' feedback shows a significant improvement over traditional storytelling and highlights the importance of context information for generative storytelling systems.

+

Lumina: A Software Tool for Fostering Creativity in Designing Chinese Shadow Puppets

+

Authors: Zhihao Yao, Yao Lu, Qirui Sun, Shiqing Lyu, Hanxuan Li, Xing-Dong Yang, Xuezhu Wang, Guanhong Liu, Haipeng Mi

+

Link

+

Abstract: Shadow puppetry, a culturally rich storytelling art, faces challenges transitioning to the digital realm. Creators in the early design phase struggle with crafting intricate patterns, textures, and basic animations while adhering to stylistic conventions - hindering creativity, especially for novices. This paper presents Lumina, a tool to facilitate the early Chinese shadow puppet design stage. Lumina provides contour templates, animations, scene editing tools, and machine-generated traditional puppet patterns. These features liberate creators from tedious tasks, allowing focus on the creative process. Developed based on a formative study with puppet creators, the web-based Lumina enables wide dissemination. An evaluation with 18 participants demonstrated Lumina's effectiveness and ease of use, with participants successfully creating designs spanning traditional themes to contemporary and science-fiction concepts.

+

PortalInk: 2.5D Visual Storytelling with SVG Parallax and Waypoint Transitions

+

Authors: Tongyu Zhou, Joshua Yang, Vivian Chan, Ji Won Chung, Jeff Huang

+

Link

+

Abstract: Efforts to expand the authoring of visual stories beyond the 2D canvas have commonly mapped flat imagery to 3D scenes or objects. This translation requires spatial reasoning, as artists must think in two spaces. We propose PortalInk, a tool for artists to craft and export 2.5D graphical stories while remaining in 2D space by using SVG transitions. This is achieved via a parallax effect that generates a sense of depth that can be further explored using pan and zoom interactions. Any canvas position can be saved and linked to in a closed drawn stroke, or "portal," allowing the artist to create spatially discontinuous, or even infinitely looping visual trajectories. We provide three case studies and a gallery to demonstrate how artists can naturally incorporate these interactions to craft immersive comics, as well as re-purpose them to support use cases beyond drawing such as animation, slide-based presentations, web design, and digital journalism.

+

DrawTalking: Building Interactive Worlds by Sketching and Speaking

+

Authors: Karl Rosenberg, Rubaiat Habib Kazi, Li-Yi Wei, Haijun Xia, Ken Perlin

+

Link

+

Abstract: We introduce DrawTalking, an approach to building and controlling interactive worlds by sketching and speaking while telling stories. It emphasizes user control and flexibility, and gives programming-like capability without requiring code. An early open-ended study with our prototype shows that the mechanics resonate and are applicable to many creative-exploratory use cases, with the potential to inspire and inform research in future natural interfaces for creative exploration and authoring.

+

Patchview: LLM-powered Worldbuilding with Generative Dust and Magnet Visualization

+

Authors: John Chung, Max Kreminski

+

Link

+

Abstract: Large language models (LLMs) can help writers build story worlds by generating world elements, such as factions, characters, and locations. However, making sense of many generated elements can be overwhelming. Moreover, if the user wants to precisely control aspects of generated elements that are difficult to specify verbally, prompting alone may be insufficient. We introduce Patchview, a customizable LLM-powered system that visually aids worldbuilding by allowing users to interact with story concepts and elements through the physical metaphor of magnets and dust. Elements in Patchview are visually dragged closer to concepts with high relevance, facilitating sensemaking. The user can also steer the generation with verbally elusive concepts by indicating the desired position of the element between concepts. When the user disagrees with the LLM's visualization and generation, they can correct those by repositioning the element. These corrections can be used to align the LLM's future behaviors to the user's perception. With a user study, we show that Patchview supports the sensemaking of world elements and steering of element generation, facilitating exploration during the worldbuilding process. Patchview provides insights on how customizable visual representation can help sensemake, steer, and align generative AI model behaviors with the user's intentions.

+

An Interactive System for Suporting Creative Exploration of Cinematic Composition Designs

+

Authors: Rui He, Huaxin Wei, Ying Cao

+

Link

+

Abstract: Designing cinematic compositions, which involves moving cameras through a scene, is essential yet challenging in filmmaking. Machinima filmmaking provides real-time virtual environments for exploring different compositions flexibly and efficiently. However, producing high-quality cinematic compositions in such environments still requires significant cinematography skills and creativity. This paper presents Cinemassist, a tool designed to support and enhance this creative process by generating a variety of cinematic composition proposals at both keyframe and scene levels, which users can incorporate into their workflows and achieve more creative results. At the crux of our system is a deep generative model trained on real movie data, which can generate plausible, diverse camera poses conditioned on 3D animations and additional input semantics. Our model enables an interactive cinematic composition design workflow where users can co-design with the model by being inspired by model-generated suggestions while having control over the generation process. Our user study and expert rating find Cinemassist can facilitate the design process for users of different backgrounds and enhance the design quality especially for users with animation expertise, demonstrating its potential as an invaluable tool in the context of digital filmmaking.

+

Break Q&A: Manipulating Text

+

Beyond the Chat: Executable and Verifiable Text-Editing with LLMs

+

Authors: Philippe Laban, Jesse Vig, Marti Hearst, Caiming Xiong, Chien-Sheng Wu

+

Link

+

Abstract: Conversational interfaces powered by Large Language Models (LLMs) have recently become a popular way to obtain feedback during document editing. However, standard chat-based conversational interfaces cannot explicitly surface the editing changes that they suggest. To give the author more control when editing with an LLM, we present InkSync, an editing interface that suggests executable edits directly within the document being edited. Because LLMs are known to introduce factual errors, Inksync also supports a 3-stage approach to mitigate this risk: Warn authors when a suggested edit introduces new information, help authors Verify the new information's accuracy through external search, and allow a third party to Audit with a-posteriori verification via a trace of all auto-generated content. +Two usability studies confirm the effectiveness of InkSync's components when compared to standard LLM-based chat interfaces, leading to more accurate and more efficient editing, and improved user experience.

+

ScriptViz: A Visualization Tool to Aid Scriptwriting based on a Large Movie Database

+

Authors: Anyi Rao, Jean-Peïc Chou, Maneesh Agrawala

+

Link

+

Abstract: Scriptwriters usually rely on their mental visualization to create a vivid story by using their imagination to see, feel, and experience the scenes they are writing. Besides mental visualization, they often refer to existing images or scenes in movies and analyze the visual elements to create a certain mood or atmosphere. In this paper, we develop a new tool, ScriptViz, to provide external visualization based on a large movie database for the screenwriting process. It retrieves reference visuals on the fly based on scripts’ text and dialogue from a large movie database. The tool provides two types of control on visual elements that enable writers to 1) see exactly what they want with fixed visual elements and 2) see variances in uncertain elements. User evaluation among 15 scriptwriters shows that ScriptViz is able to present scriptwriters with consistent yet diverse visual possibilities, aligning closely with their scripts and helping their creation.

+

SkipWriter: LLM-Powered Abbreviated Writing on Tablets

+

Authors: Zheer Xu, Shanqing Cai, Mukund Varma T, Subhashini Venugopalan, Shumin Zhai

+

Link

+

Abstract: Large Language Models (LLMs) may offer transformative opportunities for text input, especially for physically demanding modalities like handwriting. We studied a form of abbreviated handwriting by designing, developing, and evaluating a prototype, named SkipWriter, that converts handwritten strokes of a variable-length prefix-based abbreviation (e.g. "ho a y" as handwritten strokes) into the intended full phrase (e.g., "how are you" in the digital format) based on the preceding context. SkipWriter consists of an in-production handwriting recognizer and an LLM fine-tuned on this task. With flexible pen input, SkipWriter allows the user to add and revise prefix strokes when predictions do not match the user's intent. An user evaluation demonstrated a 60% reduction in motor movements with an average speed of 25.78 WPM. We also showed that this reduction is close to the ceiling of our model in an offline simulation.

+

Bluefish: Composing Diagrams with Declarative Relations

+

Authors: Josh Pollock, Catherine Mei, Grace Huang, Elliot Evans, Daniel Jackson, Arvind Satyanarayan

+

Link

+

Abstract: Diagrams are essential tools for problem-solving and communication as they externalize conceptual structures using spatial relationships. But when picking a diagramming framework, users are faced with a dilemma. They can either use a highly expressive but low-level toolkit, whose API does not match their domain-specific concepts, or select a high-level typology, which offers a recognizable vocabulary but supports a limited range of diagrams. To address this gap, we introduce Bluefish: a diagramming framework inspired by component-based user interface (UI) libraries. Bluefish lets users create diagrams using relations: declarative, composable, and extensible diagram fragments that relax the concept of a UI component. Unlike a component, a relation does not have sole ownership over its children nor does it need to fully specify their layout. To render diagrams, Bluefish extends a traditional tree-based scenegraph to a compound graph that captures both hierarchical and adjacent relationships between nodes. To evaluate our system, we construct a diverse example gallery covering many domains including mathematics, physics, computer science, and even cooking. We show that Bluefish's relations are effective declarative primitives for diagrams. Bluefish is open source, and we aim to shape it into both a usable tool and a research platform.

+

Break Q&A: Hot Interfaces

+

Fiery Hands: Designing Thermal Glove through Thermal and Tactile Integration for Virtual Object Manipulation

+

Authors: Haokun Wang, Yatharth Singhal, Hyunjae Gil, Jin Ryong Kim

+

Link

+

Abstract: We present a novel approach to render thermal and tactile feedback to the palm and fingertips through thermal and tactile integration. Our approach minimizes the obstruction of the palm and inner side of the fingers and enables virtual object manipulation while providing localized and global thermal feedback. By leveraging thermal actuators positioned strategically on the outer palm and back of the fingers in interplay with tactile actuators, our approach exploits thermal referral and tactile masking phenomena. Through a series of user studies, we validate the perception of localized thermal sensations across the palm and fingers, showcasing the ability to generate diverse thermal patterns. Furthermore, we demonstrate the efficacy of our approach in VR applications, replicating diverse thermal interactions with virtual objects. This work represents significant progress in thermal interactions within VR, offering enhanced sensory immersion at an optimal energy cost.

+

DexteriSync: A Hand Thermal I/O Exoskeleton for Morphing Finger Dexterity Experience

+

Authors: Ximing Shen, Youichi Kamiyama, Kouta Minamizawa, Jun Nishida

+

Link

+

Abstract: Skin temperature is an important physiological factor for human hand dexterity. Leveraging this feature, we engineered an exoskeleton, called DexteriSync, that can dynamically adjust the user's finger dexterity and induce different thermal perceptions by modulating finger skin temperature. This exoskeleton comprises flexible silicone-copper tube segments, 3D-printed finger sockets, a 3D-printed palm base, a pump system, and a water temperature control with a storage unit. By realising an embodied experience of compromised dexterity, DexteriSync can help product designers understand the lived experience of compromised hand dexterity, such as that of the elderly and/or neurodivergent users, when designing daily necessities for them. We validated DexteriSync via a technical evaluation and two user studies, demonstrating that it can change skin temperature, dexterity, and thermal perception. An exploratory session with design students and an autistic compromised dexterity individual, demonstrated the exoskeleton provided a more realistic experience compared to video education, and allowed them to gain higher confidence in their designs. The results advocated for the efficacy of experiencing embodied compromised finger dexterity, which can promote an understanding of the related physical challenges and lead to a more persuasive design for assistive tools.

+

Flip-Pelt: Motor-Driven Peltier Elements for Rapid Thermal Stimulation and Congruent Pressure Feedback in Virtual Reality

+

Authors: Seongjun Kang, Gwangbin Kim, Seokhyun Hwang, Jeongju Park, Ahmed Elsharkawy, SeungJun Kim

+

Link

+

Abstract: This study introduces "Flip-Pelt," a motor-driven peltier device designed to provide rapid thermal stimulation and congruent pressure feedback in virtual reality (VR) environments. Our system incorporates eight motor-driven peltier elements, allowing for the flipping of preheated or cooled elements to the opposite side. In evaluating the Flip-Pelt device, we assess user ability to distinguish between heat/cold sources by their patterns and stiffness, and its impact on enhancing haptic experiences in VR content that involves contact with various thermal sources. Our findings demonstrate that rapid thermal stimulation and congruent pressure feedback provided by Flip-Pelt enhance the recognition accuracy of thermal patterns and the stiffness of virtual objects. These features also improve haptic experiences in VR scenarios through their temporal congruency between tactile and thermal stimuli. Additionally, we discuss the scalability of the Flip-Pelt system to other body parts by proposing design prototypes.

+

Hydroptical Thermal Feedback: Spatial Thermal Feedback Using Visible Lights and Water

+

Authors: Sosuke Ichihashi, Masahiko Inami, Hsin-Ni Ho, Noura Howell

+

Link

+

Abstract: We control the temperature of materials in everyday interactions, recognizing temperature's important influence on our bodies, minds, and experiences. However, thermal feedback is an under-explored modality in human-computer interaction partly due to its limited temporal (slow) and spatial (small-area and non-moving) capabilities. We introduce hydroptical thermal feedback, a spatial thermal feedback method that works by applying visible lights on body parts in water. Through physical measurements and psychophysical experiments, our results show: (1) Humans perceive thermal sensations when visible lights are cast on the skin under water, and perceived warmth is greater for lights with shorter wavelengths, (2) temporal capabilities, (3) apparent motion (spatial) of warmth and coolness sensations, and (4) hydroptical thermal feedback can support the perceptual illusion that the water itself is warmer. We propose applications, including virtual reality (VR), shared water experiences, and therapies. Overall, this paper contributes hydroptical thermal feedback as a novel method, empirical results demonstrating its unique capabilities, proposed applications, and design recommendations for using hydroptical thermal feedback. Our method introduces controlled, spatial thermal perceptions to water experiences.

+

Break Q&A: LLM: New applications

+

VoicePilot: Harnessing LLMs as Speech Interfaces for Assistive Robotics

+

Authors: Akhil Padmanabha, Jessie Yuan, Janavi Gupta, Zulekha Karachiwalla, Carmel Majidi, Henny Admoni, Zackory Erickson

+

Link

+

Abstract: Physically assistive robots present an opportunity to significantly increase the well-being and independence of individuals with motor impairments or other forms of disability who are unable to complete activities of daily living. Speech interfaces, especially ones that utilize Large Language Models (LLMs), can enable individuals to effectively and naturally communicate high-level commands and nuanced preferences to robots. Frameworks for integrating LLMs as interfaces to robots for high level task planning and code generation have been proposed, but fail to incorporate human-centric considerations which are essential while developing assistive interfaces. In this work, we present a framework for incorporating LLMs as speech interfaces for physically assistive robots, constructed iteratively with 3 stages of testing involving a feeding robot, culminating in an evaluation with 11 older adults at an independent living facility. We use both quantitative and qualitative data from the final study to validate our framework and additionally provide design guidelines for using LLMs as speech interfaces for assistive robots. Videos, code, and supporting files are located on our project website\footnote{\url{https://sites.google.com/andrew.cmu.edu/voicepilot/}}

+

ComPeer: A Generative Conversational Agent for Proactive Peer Support

+

Authors: Tianjian Liu, Hongzheng Zhao, Yuheng Liu, Xingbo Wang, Zhenhui Peng

+

Link

+

Abstract: Conversational Agents (CAs) acting as peer supporters have been widely studied and demonstrated beneficial for people's mental health. However, previous peer support CAs either are user-initiated or follow predefined rules to initiate the conversations, which may discourage users to engage and build relationships with the CAs for long-term benefits. In this paper, we develop ComPeer, a generative CA that can proactively offer adaptive peer support to users. ComPeer leverages large language models to detect and reflect significant events in the dialogue, enabling it to strategically plan the timing and content of proactive care. In addition, ComPeer incorporates peer support strategies, conversation history, and its persona into the generative messages. Our one-week between-subjects study (N=24) demonstrates ComPeer's strength in providing peer support over time and boosting users' engagement compared to a baseline user-initiated CA. We report users' interaction patterns with ComPeer and discuss implications for designing proactive generative agents to promote people's well-being.

+

SHAPE-IT: Exploring Text-to-Shape-Display for Generative Shape-Changing Behaviors with LLMs

+

Authors: Wanli Qian, Chenfeng Gao, Anup Sathya, Ryo Suzuki, Ken Nakagaki

+

Link

+

Abstract: This paper introduces text-to-shape-display, a novel approach to generating dynamic shape changes in pin-based shape displays through natural language commands. By leveraging large language models (LLMs) and AI-chaining, our approach allows users to author shape-changing behaviors on demand through text prompts without programming. We describe the foundational aspects necessary for such a system, including the identification of key generative elements (primitive, animation, and interaction) and design requirements to enhance user interaction, based on formative exploration and iterative design processes. Based on these insights, we develop SHAPE-IT, an LLM-based authoring tool for a 24 x 24 shape display, which translates the user's textual command into executable code and allows for quick exploration through a web-based control interface. We evaluate the effectiveness of SHAPE-IT in two ways: 1) performance evaluation and 2) user evaluation (N= 10). The study conclusions highlight the ability to facilitate rapid ideation of a wide range of shape-changing behaviors with AI. However, the findings also expose accuracy-related challenges and limitations, prompting further exploration into refining the framework for leveraging AI to better suit the unique requirements of shape-changing systems.

+

WaitGPT: Monitoring and Steering Conversational LLM Agent in Data Analysis with On-the-Fly Code Visualization

+

Authors: Liwenhan Xie, Chengbo Zheng, Haijun Xia, Huamin Qu, Chen Zhu-Tian

+

Link

+

Abstract: Large language models (LLMs) support data analysis through conversational user interfaces, as exemplified in OpenAI's ChatGPT (formally known as Advanced Data Analysis or Code Interpreter). Essentially, LLMs produce code for accomplishing diverse analysis tasks. However, presenting raw code can obscure the logic and hinder user verification. To empower users with enhanced comprehension and augmented control over analysis conducted by LLMs, we propose a novel approach to transform LLM-generated code into an interactive visual representation. In the approach, users are provided with a clear, step-by-step visualization of the LLM-generated code in real time, allowing them to understand, verify, and modify individual data operations in the analysis. Our design decisions are informed by a formative study (N=8) probing into user practice and challenges. We further developed a prototype named WaitGPT and conducted a user study (N=12) to evaluate its usability and effectiveness. The findings from the user study reveal that WaitGPT facilitates monitoring and steering of data analysis performed by LLMs, enabling participants to enhance error detection and increase their overall confidence in the results.

+

Break Q&A: Big to Small Fab

+

Don't Mesh Around: Streamlining Manual-Digital Fabrication Workflows with Domain-Specific 3D Scanning

+

Authors: Ilan Moyer, Sam Bourgault, Devon Frost, Jennifer Jacobs

+

Link

+

Abstract: Software-first digital fabrication workflows are often at odds with material-driven approaches to design. Material-driven design is especially critical in manual ceramics, where the craftsperson shapes the form through hands-on engagement. We present the Craft-Aligned Scanner (CAS), a 3D scanning and clay-3D printing system that enables practitioners to design for digital fabrication through traditional pottery techniques. The CAS augments a pottery wheel that has 3D printing capabilities with a precision distance sensor on a vertically oriented linear axis. By increasing the height of the sensor as the wheel turns, we directly synthesize a 3D spiralized toolpath from the geometry of the object on the wheel, enabling the craftsperson to immediately transition from manual fabrication to 3D printing without leaving the tool. We develop new digital fabrication workflows with CAS to augment scanned forms with functional features and add both procedurally and real-time-generated surface textures. CAS demonstrates how 3D printers can support material-first digital fabrication design without foregoing the expressive possibilities of software-based design.

+

E-Joint: Fabrication of Large-Scale Interactive Objects Assembled by 3D Printed Conductive Parts with Copper Plated Joints

+

Authors: Xiaolong Li, Cheng Yao, Shang Shi, Shuyue Feng, Yujie Zhou, Haoye Dong, Shichao Huang, Xueyan Cai, Kecheng Jin, Fangtian Ying, Guanyun Wang

+

Link

+

Abstract: The advent of conductive thermoplastic filaments and multi-material 3D printing has made it feasible to create interactive 3D printed objects. Yet, challenges arise due to volume constraints of desktop 3D printers and high resistive characteristics of current conductive materials, making the fabrication of large-scale or highly conductive interactive objects can be daunting. We propose E-Joint, a novel fabrication pipeline for 3D printed objects utilizing mortise and tenon joint structures combined with a copper plating process. The segmented pieces and joint structures are customized in software along with integrated circuits. Then electroplate them for enhanced conductivity. We designed four distinct electrified joint structures in experiment and evaluated the practical feasibility and effectiveness of fabricating pipes. By constructing three applications with those structures, we verified the usability of E-Joint in making large-scale interactive objects and show path to a more integrated future for manufacturing.

+

MobiPrint: A Mobile 3D Printer for Environment-Scale Design and Fabrication

+

Authors: Daniel Campos Zamora, Liang He, Jon Froehlich

+

Link

+

Abstract: 3D printing is transforming how we customize and create physical objects in engineering, accessibility, and art. However, this technology is still primarily limited to confined working areas and dedicated print beds thereby detaching design and fabrication from real-world environments and making measuring and scaling objects tedious and labor-intensive. In this paper, we present MobiPrint, a prototype mobile fabrication system that combines elements from robotics, architecture, and Human-Computer Interaction (HCI) to enable environment-scale design and fabrication in ad-hoc indoor environments. MobiPrint provides a multi-stage fabrication pipeline: first, the robotic 3D printer automatically scans and maps an indoor space; second, a custom design tool converts the map into an interactive CAD canvas for editing and placing models in the physical world; finally, the MobiPrint robot prints the object directly on the ground at the defined location. Through a "proof-by-demonstration" validation, we highlight our system's potential across different applications, including accessibility, home furnishing, floor signage, and art. We also conduct a technical evaluation to assess MobiPrint’s localization accuracy, ground surface adhesion, payload capacity, and mapping speed. We close with a discussion of open challenges and opportunities for the future of contextualized mobile fabrication.

+

StructCurves: Interlocking Block-Based Line Structures

+

Authors: Zezhou Sun, Devin Balkcom, Emily Whiting

+

Link

+

Abstract: We present a new class of curved block-based line structures whose component chains are flexible when separated, and provably rigid when assembled together into an interlocking double chain. The joints are inspired by traditional zippers, where a binding fabric or mesh connects individual teeth. +Unlike traditional zippers, the joint design produces a rigid interlock with programmable curvature. This allows fairly strong curved structures to be built out of easily stored flexible chains. +In this paper, we introduce a pipeline for generating these curved structures using a novel block design template based on revolute joints. +Mesh embedded in these structures maintains block spacing and assembly order. We evaluate the rigidity of the curved structures through mechanical performance testing and demonstrate several applications.

+

Break Q&A: Shared Spaces

+

BlendScape: Enabling End-User Customization of Video-Conferencing Environments through Generative AI

+

HONORABLE_MENTION

+

Authors: Shwetha Rajaram, Nels Numan, Bala Kumaravel, Nicolai Marquardt, Andrew Wilson

+

Link

+

Abstract: Today’s video-conferencing tools support a rich range of professional and social activities, but their generic meeting environments cannot be dynamically adapted to align with distributed collaborators’ needs. To enable end-user customization, we developed BlendScape, a rendering and composition system for video-conferencing participants to tailor environments to their meeting context by leveraging AI image generation techniques. BlendScape supports flexible representations of task spaces by blending users’ physical or digital backgrounds into unified environments and implements multimodal interaction techniques to steer the generation. Through an exploratory study with 15 end-users, we investigated whether and how they would find value in using generative AI to customize video-conferencing environments. Participants envisioned using a system like BlendScape to facilitate collaborative activities in the future, but required further controls to mitigate distracting or unrealistic visual elements. We implemented scenarios to demonstrate BlendScape's expressiveness for supporting environment design strategies from prior work and propose composition techniques to improve the quality of environments.

+

SpaceBlender: Creating Context-Rich Collaborative Spaces Through Generative 3D Scene Blending

+

Authors: Nels Numan, Shwetha Rajaram, Bala Kumaravel, Nicolai Marquardt, Andrew Wilson

+

Link

+

Abstract: There is increased interest in using generative AI to create 3D spaces for virtual reality (VR) applications. However, today’s models produce artificial environments, falling short of supporting collaborative tasks that benefit from incorporating the user's physical context. To generate environments that support VR telepresence, we introduce SpaceBlender, a novel pipeline that utilizes generative AI techniques to blend users' physical surroundings into unified virtual spaces. This pipeline transforms user-provided 2D images into context-rich 3D environments through an iterative process consisting of depth estimation, mesh alignment, and diffusion-based space completion guided by geometric priors and adaptive text prompts. In a preliminary within-subjects study, where 20 participants performed a collaborative VR affinity diagramming task in pairs, we compared SpaceBlender with a generic virtual environment and a state-of-the-art scene generation framework, evaluating its ability to create virtual spaces suitable for collaboration. Participants appreciated the enhanced familiarity and context provided by SpaceBlender but also noted complexities in the generative environments that could detract from task focus. Drawing on participant feedback, we propose directions for improving the pipeline and discuss the value and design of blended spaces for different scenarios.

+

MyWebstrates: Webstrates as Local-first Software

+

Authors: Clemens Klokmose, James Eagan, Peter van Hardenberg

+

Link

+

Abstract: Webstrates are web substrates, a practical realization of shareable dynamic media under which distributability, shareability, and malleability are fundamental software principles. Webstrates blur the distinction between application and document in a way that enables users to share, repurpose, and refit software across a variety of domains, but its reliance on a central server constrains its use; it is at odds with personal and collective control of data; and limits applications to the web. We extend the fundamental principles to include interoperability and sovereignty over data and propose MyWebstrates, an implementation of Webstrates on top of a new, lower-level substrate for synchronization built around local-first software principles. MyWebstrates registers itself in the user’s browser and function as a piece of local software that can selectively synchronise data over sync servers or peer-to-peer connections. We show how MyWebstrates extends Webstrates to enable offline collaborative use, interoperate between Webstrates on non-web technologies such as Unity, and maintain personal and collective sovereignty over data. We demonstrate how this enables new types of applications of Webstrates and discuss limitations of this approach and new challenges that it reveals.

+

SituationAdapt: Contextual UI Optimization in Mixed Reality with Situation Awareness via LLM Reasoning

+

Authors: Zhipeng Li, Christoph Gebhardt, Yves Inglin, Nicolas Steck, Paul Streli, Christian Holz

+

Link

+

Abstract: Mixed Reality is increasingly used in mobile settings beyond controlled home and office spaces. This mobility introduces the need for user interface layouts that adapt to varying contexts. However, existing adaptive systems are designed only for static environments. In this paper, we introduce SituationAdapt, a system that adjusts Mixed Reality UIs to real-world surroundings by considering environmental and social cues in shared settings. Our system consists of perception, reasoning, and optimization modules for UI adaptation. Our perception module identifies objects and individuals around the user, while our reasoning module leverages a Vision-and-Language Model to assess the placement of interactive UI elements. This ensures that adapted layouts do not obstruct relevant environmental cues or interfere with social norms. Our optimization module then generates Mixed Reality interfaces that account for these considerations as well as temporal constraints The evaluation of SituationAdapt is two-fold: We first validate our reasoning component’s capability in assessing UI contexts comparable to human expert users. In an online user study, we then established our system’s capability of producing context-aware MR layouts, where it outperformed adaptive methods from previous work. We further demonstrate the versatility and applicability of SituationAdapt with a set of application scenarios.

+

Desk2Desk: Optimization-based Mixed Reality Workspace Integration for Remote Side-by-side Collaboration

+

Authors: Ludwig Sidenmark, Tianyu Zhang, Leen Al Lababidi, Jiannan Li, Tovi Grossman

+

Link

+

Abstract: Mixed Reality enables hybrid workspaces where physical and virtual monitors are adaptively created and moved to suit the current environment and needs. However, in shared settings, individual users’ workspaces are rarely aligned and can vary significantly in the number of monitors, available physical space, and workspace layout, creating inconsistencies between workspaces which may cause confusion and reduce collaboration. We present Desk2Desk, an optimization-based approach for remote collaboration in which the hybrid workspaces of two collaborators are fully integrated to enable immersive side-by-side collaboration. The optimization adjusts each user’s workspace in layout and number of shared monitors and creates a mapping between workspaces to handle inconsistencies between workspaces due to physical constraints (e.g. physical monitors). We show in a user study how our system adaptively merges dissimilar physical workspaces to enable immersive side-by-side collaboration, and demonstrate how an optimization-based approach can effectively address dissimilar physical layouts.

+

Break Q&A: Learning to Learn

+

Patterns of Hypertext-Augmented Sensemaking

+

Authors: Siyi Zhu, Robert Haisfield, Brendan Langen, Joel Chan

+

Link

+

Abstract: The early days of HCI were marked by bold visions of hypertext as a transformative medium for augmented sensemaking, exemplified in systems like Memex, Xanadu, and NoteCards. Today, however, hypertext is often disconnected from discussions of the future of sensemaking. In this paper, we investigate how the recent resurgence in hypertext ``tools for thought'' might point to new directions for hypertext-augmented sensemaking. Drawing on detailed analyses of guided tours with 23 scholars, we describe hypertext-augmented use patterns for dealing with the core problem of revisiting and reusing existing/past ideas during scholarly sensemaking. We then discuss how these use patterns validate and extend existing knowledge of hypertext design patterns for sensemaking, and point to new design opportunities for augmented sensemaking.

+

Augmented Physics: Creating Interactive and Embedded Physics Simulations from Static Textbook Diagrams

+

BEST_PAPER

+

Authors: Aditya Gunturu, Yi Wen, Nandi Zhang, Jarin Thundathil, Rubaiat Habib Kazi, Ryo Suzuki

+

Link

+

Abstract: We introduce Augmented Physics, a machine learning-integrated authoring tool designed for creating embedded interactive physics simulations from static textbook diagrams. Leveraging recent advancements in computer vision, such as Segment Anything and Multi-modal LLMs, our web-based system enables users to semi-automatically extract diagrams from physics textbooks and generate interactive simulations based on the extracted content. These interactive diagrams are seamlessly integrated into scanned textbook pages, facilitating interactive and personalized learning experiences across various physics concepts, such as optics, circuits, and kinematics. Drawing from an elicitation study with seven physics instructors, we explore four key augmentation strategies: 1) augmented experiments, 2) animated diagrams, 3) bi-directional binding, and 4) parameter visualization. We evaluate our system through technical evaluation, a usability study (N=12), and expert interviews (N=12). Study findings suggest that our system can facilitate more engaging and personalized learning experiences in physics education.

+

Qlarify: Recursively Expandable Abstracts for Dynamic Information Retrieval over Scientific Papers

+

Authors: Raymond Fok, Joseph Chee Chang, Tal August, Amy Zhang, Daniel Weld

+

Link

+

Abstract: Navigating the vast scientific literature often starts with browsing a paper’s abstract. However, when a reader seeks additional information, not present in the abstract, they face a costly cognitive chasm during their dive into the full text. To bridge this gap, we introduce recursively expandable abstracts, a novel interaction paradigm that dynamically expands abstracts by progressively incorporating additional information from the papers’ full text. This lightweight interaction allows scholars to specify their information needs by quickly brushing over the abstract or selecting AI-suggested expandable entities. Relevant information is synthesized using a retrieval-augmented generation approach, presented as a fluid, threaded expansion of the abstract, and made efficiently verifiable via attribution to relevant source-passages in the paper. Through a series of user studies, we demonstrate the utility of recursively expandable abstracts and identify future opportunities to support low-effort and just-in-time exploration of long-form information contexts through LLM-powered interactions.

+

LessonPlanner: Assisting Novice Teachers to Prepare Pedagogy-Driven Lesson Plans with Large Language Models

+

Authors: Haoxiang Fan, Guanzheng Chen, Xingbo Wang, Zhenhui Peng

+

Link

+

Abstract: Preparing a lesson plan, e.g., a detailed road map with strategies and materials for instructing a 90-minute class, is beneficial yet challenging for novice teachers. Large language models (LLMs) can ease this process by generating adaptive content for lesson plans, which would otherwise require teachers to create from scratch or search existing resources. In this work, we first conduct a formative study with six novice teachers to understand their needs for support of preparing lesson plans with LLMs. Then, we develop LessonPlanner that assists users to interactively construct lesson plans with adaptive LLM-generated content based on Gagne's nine events. Our within-subjects study (N=12) shows that compared to the baseline ChatGPT interface, LessonPlanner can significantly improve the quality of outcome lesson plans and ease users' workload in the preparation process. Our expert interviews (N=6) further demonstrate LessonPlanner's usefulness in suggesting effective teaching strategies and meaningful educational resources. We discuss concerns on and design considerations for supporting teaching activities with LLMs.

+

Break Q&A: Generating Visuals

+

ShadowMagic: Designing Human-AI Collaborative Support for Comic Professionals’ Shadowing

+

Authors: Amrita Ganguly, Chuan Yan, John Chung, Tong Sun, YOON KIHEON, Yotam Gingold, Sungsoo Ray Hong

+

Link

+

Abstract: Shadowing allows artists to convey realistic volume and emotion of characters in comic colorization. While AI technologies have the potential to improve professionals’ shadowing experience, current practice is manual and time-consuming. To understand how we can improve their shadowing experience, we conducted interviews with 5 professionals. We found that professionals’ level of engagement can vary depending on semantics, such as characters’ faces or hair. We also found they spent time on shadow “landscaping”—deciding where to place large shadow regions to create a realistic volumetric presentation while the final results can vary dramatically depending on their “staging” and “attention guiding” needs. We discovered they would accept AI suggestions for less engaging semantic parts or landscaping, while needing the capability to adjust details. Based on our observations, we developed ShadowMagic, which (1) generates AI-driven shadows based on commonly used light directions, (2) enables users to selectively choose results depending on semantics, and (3) allows users to complete shadow areas themselves for further perfection. Through a summative evaluation with 5 professionals, we found that they were significantly more satisfied with our AI-driven results compared to a baseline. We also found that ShadowMagic’s “step by step” workflow helps participants more easily adopt AI-driven results. We conclude by providing implications.

+

What's the Game, then? Opportunities and Challenges for Runtime Behavior Generation

+

BEST_PAPER

+

Authors: Nicholas Jennings, Han Wang, Isabel Li, James Smith, Bjoern Hartmann

+

Link

+

Abstract: Procedural content generation (PCG), the process of algorithmically creating game components instead of manually, has been a common tool of game development for decades. Recent advances in large language models (LLMs) enable the generation of game behaviors based on player input at runtime. Such code generation brings with it the possibility of entirely new gameplay interactions that may be difficult to integrate with typical game development workflows. We explore these implications through GROMIT, a novel LLM-based runtime behavior generation system for Unity. When triggered by a player action, GROMIT generates a relevant behavior which is compiled without developer intervention and incorporated into the game. We create three demonstration scenarios with GROMIT to investigate how such a technology might be used in game development. In a system evaluation we find that our implementation is able to produce behaviors that result in significant downstream impacts to gameplay. We then conduct an interview study with n=13 game developers using GROMIT as a probe to elicit their current opinion on runtime behavior generation tools, and enumerate the specific themes curtailing the wider use of such tools. We find that the main themes of concern are quality considerations, community expectations, and fit with developer workflows, and that several of the subthemes are unique to runtime behavior generation specifically. We outline a future work agenda to address these concerns, including the need for additional guardrail systems for behavior generation.

+

StyleFactory: Towards Better Style Alignment in Image Creation through Style-Strength-Based Control and Evaluation

+

Authors: Mingxu Zhou, Dengming Zhang, Weitao You, Ziqi Yu, Yifei Wu, Chenghao Pan, Huiting Liu, Tianyu Lao, Pei Chen

+

Link

+

Abstract: Generative AI models have been widely used for image creation. However, generating images that are well-aligned with users' personal styles on aesthetic features (e.g., color and texture) can be challenging due to the poor style expression and interpretation between humans and models. Through a formative study, we observed that participants showed a clear subjective perception of the desired style and variations in its strength, which directly inspired us to develop style-strength-based control and evaluation. Building on this, we present StyleFactory, an interactive system that helps users achieve style alignment. Our interface enables users to rank images based on their strengths in the desired style and visualizes the strength distribution of other images in that style from the model's perspective. In this way, users can evaluate the understanding gap between themselves and the model, and define well-aligned personal styles for image creation through targeted iterations. Our technical evaluation and user study demonstrate that StyleFactory accurately generates images in specific styles, effectively facilitates style alignment in image creation workflow, stimulates creativity, and enhances the user experience in human-AI interactions.

+

AutoSpark: Supporting Automobile Appearance Design Ideation with Kansei Engineering and Generative AI

+

Authors: Liuqing Chen, Qianzhi Jing, Yixin Tsang, Qianyi Wang, Ruocong Liu, Duowei Xia, Yunzhan Zhou, Lingyun Sun

+

Link

+

Abstract: Rapid creation of novel product appearance designs that align with consumer emotional requirements poses a significant challenge. Text-to-image models, with their excellent image generation capabilities, have demonstrated potential in providing inspiration to designers. However, designers still encounter issues including aligning emotional needs, expressing design intentions, and comprehending generated outcomes in practical applications. To address these challenges, we introduce AutoSpark, an interactive system that integrates Kansei Engineering and generative AI to provide creativity support for designers in creating automobile appearance designs that meet emotional needs. AutoSpark employs a Kansei Engineering engine powered by generative AI and a semantic network to assist designers in emotional need alignment, design intention expression, and prompt crafting. It also facilitates designers' understanding and iteration of generated results through fine-grained image-image similarity comparisons and text-image relevance assessments. The design-thinking map within its interface aids in managing the design process. Our user study indicates that AutoSpark effectively aids designers in producing designs that are more aligned with emotional needs and of higher quality compared to a baseline system, while also enhancing the designers' experience in the human-AI co-creation process.

+

Break Q&A: Hacking Perception

+

Predicting the Limits: Tailoring Unnoticeable Hand Redirection Offsets in Virtual Reality to Individuals’ Perceptual Boundaries

+

Authors: Martin Feick, Kora Regitz, Lukas Gehrke, André Zenner, Anthony Tang, Tobias Jungbluth, Maurice Rekrut, Antonio Krüger

+

Link

+

Abstract: Many illusion and interaction techniques in Virtual Reality (VR) rely on Hand Redirection (HR), which has proved to be effective as long as the introduced offsets between the position of the real and virtual hand do not noticeably disturb the user experience. Yet calibrating HR offsets is a tedious and time-consuming process involving psychophysical experimentation, and the resulting thresholds are known to be affected by many variables---limiting HR's practical utility. As a result, there is a clear need for alternative methods that allow tailoring HR to the perceptual boundaries of individual users. We conducted an experiment with 18 participants combining movement, eye gaze and EEG data to detect HR offsets Below, At, and Above individuals' detection thresholds. Our results suggest that we can distinguish HR At and Above from no HR. Our exploration provides a promising new direction with potentially strong implications for the broad field of VR illusions.

+

Modulating Heart Activity and Task Performance using Haptic Heartbeat Feedback: A Study Across Four Body Placements

+

Authors: Andreia Valente, Dajin Lee, Seungmoon Choi, Mark Billinghurst, Augusto Esteves

+

Link

+

Abstract: This paper explores the impact of vibrotactile haptic feedback on heart activity when the feedback is provided at four different body locations (chest, wrist, neck, and ankle) and with two feedback rates (50 bpm and 110 bpm). A user study found that the neck placement resulted in higher heart rates and lower heart rate variability, and higher frequencies correlated with increased heart rates and decreased heart rate variability. The chest was preferred in self-reported metrics, and neck placement was perceived as less satisfying, harmonious, and immersive. This research contributes to understanding the interplay between psychological experiences and physiological responses when using haptic biofeedback resembling real body signals.

+

Augmented Breathing via Thermal Feedback in the Nose

+

Authors: Jas Brooks, Alex Mazursky, Janice Hixon, Pedro Lopes

+

Link

+

Abstract: We propose, engineer, and study a novel method to augment the feeling of breathing—enabling interactive applications to let users feel like they are inhaling more/less air (perceived nasal airflow). We achieve this effect by cooling or heating the nose in sync with the user’s inhalation. Our illusion builds on the physiology of breathing: we perceive our breath predominantly through the cooling of our nasal cavities during inhalation. This is why breathing in a “fresh” cold environment feels easier than in a “stuffy” hot environment, even when the inhaled volume is the same. Our psychophysical study confirmed that our in-nose temperature stimulation significantly influenced breathing perception in both directions: making it feel harder & easier to breathe. Further, we found that ~90% of the trials were described as a change in perceived airflow/breathing, while only ~8% as temperature. Following, we engineered a compact device worn across the septum that uses Peltier elements. We illustrate the potential of this augmented breathing in interactive contexts, such as for virtual reality (e.g., rendering ease of breathing crisp air or difficulty breathing with a deteriorated gas mask) and everyday interactions (e.g., in combination with a relaxation application or to alleviate the perceived breathing resistance when wearing a mask).

+

Thermal In Motion: Designing Thermal Flow Illusions with Tactile and Thermal Interaction

+

Authors: Yatharth Singhal, Daniel Honrales, Haokun Wang, Jin Ryong Kim

+

Link

+

Abstract: This study presents a novel method for creating moving thermal sensations by integrating the thermal referral illusion with tactile motion. Conducted through three experiments on human forearms, the first experiment examined the impact of temperature and thermal actuator placement on perceived thermal motion, finding the clearest perception with a centrally positioned actuator under both hot and cold conditions. The second experiment identified the speed thresholds of perceived thermal motion, revealing a wider detectable range in hot conditions (1.8 cm/s to 9.5cm/s) compared to cold conditions (2.4cm/s to 5.0cm/s). Finally, we integrated our approach into virtual reality (VR) to assess its feasibility through two interaction scenarios. Our results shed light on the comprehension of thermal perception and its integration with tactile cues, promising significant advancements in incorporating thermal motion into diverse thermal interfaces for immersive VR experiences.

+

Break Q&A: Beyond mobile

+

picoRing: battery-free rings for subtle thumb-to-index input

+

Authors: Ryo Takahashi, Eric Whitmire, Roger Boldu, Shiu Ng, Wolf Kienzle, Hrvoje Benko

+

Link

+

Abstract: Smart rings for subtle, reliable finger input offer an attractive path for ubiquitous interaction with wearable computing platforms. +However, compared to ordinary rings worn for cultural or fashion reasons, smart rings are much bulkier and less comfortable, largely due to the space required for a battery, which also limits the space available for sensors. +This paper presents picoRing, a flexible sensing architecture that enables a variety of battery-free smart rings paired with a wristband. +By inductively connecting a wristband-based sensitive reader coil with a ring-based fully-passive sensor coil, picoRing enables the wristband to stably detect the passive response from the ring via a weak inductive coupling. +We demonstrate four different rings that support thumb-to-finger interactions like pressing, sliding, or scrolling. +When users perform these interactions, the corresponding ring converts each input into a unique passive response through a network of passive switches. +Combining the coil-based sensitive readout with the fully-passive ring design enables a tiny ring that weighs as little as 1.5 g and achieves a 13 cm stable readout despite finger bending, and proximity to metal.

+

WatchLink: Enhancing Smartwatches with Sensor Add-Ons via ECG Interface

+

Authors: Anandghan Waghmare, Ishan Chatterjee, Vikram Iyer, Shwetak Patel

+

Link

+

Abstract: We introduce a low-power communication method that lets smartwatches leverage existing electrocardiogram (ECG) hardware as a data communication interface. Our unique approach enables the connection of external, inexpensive, and low-power "add-on" sensors to the smartwatch, expanding its functionalities. These sensors cater to specialized user needs beyond those offered by pre-built sensor suites, at a fraction of the cost and power of traditional communication protocols, including Bluetooth Low Energy. To demonstrate the feasibility of our approach, we conduct a series of exploratory and evaluative tests to characterize the ECG interface as a communication channel on commercial smartwatches. We design a simple transmission scheme using commodity components, demonstrating cost and power benefits. Further, we build and test a suite of add-on sensors, including UV light, body temperature, buttons, and breath alcohol, all of which achieved testing objectives at low material cost and power usage. This research paves the way for personalized and user-centric wearables by offering a cost-effective solution to expand their functionalities.

+

PrISM-Observer: Intervention Agent to Help Users Perform Everyday Procedures Sensed using a Smartwatch

+

Authors: Riku Arakawa, Hiromu Yakura, Mayank Goel

+

Link

+

Abstract: We routinely perform procedures (such as cooking) that include a set of atomic steps. Often, inadvertent omission or misordering of a single step can lead to serious consequences, especially for those experiencing cognitive challenges such as dementia. This paper introduces PrISM-Observer, a smartwatch-based, context-aware, real-time intervention system designed to support daily tasks by preventing errors. Unlike traditional systems that require users to seek out information, the agent observes user actions and intervenes proactively. This capability is enabled by the agent's ability to continuously update its belief in the user's behavior in real-time through multimodal sensing and forecast optimal intervention moments and methods. We first validated the steps-tracking performance of our framework through evaluations across three datasets with different complexities. Then, we implemented a real-time agent system using a smartwatch and conducted a user study in a cooking task scenario. The system generated helpful interventions, and we gained positive feedback from the participants. The general applicability of PrISM-Observer to daily tasks promises broad applications, for instance, including support for users requiring more involved interventions, such as people with dementia or post-surgical patients.

+

Break Q&A: New realities

+

SIM2VR: Towards Automated Biomechanical Testing in VR

+

Authors: Florian Fischer, Aleksi Ikkala, Markus Klar, Arthur Fleig, Miroslav Bachinski, Roderick Murray-Smith, Perttu Hämäläinen, Antti Oulasvirta, Jörg Müller

+

Link

+

Abstract: Automated biomechanical testing has great potential for the development of VR applications, as initial insights into user behaviour can be gained in silico early in the design process. +In particular, it allows prediction of user movements and ergonomic variables, such as fatigue, prior to conducting user studies. +However, there is a fundamental disconnect between simulators hosting state-of-the-art biomechanical user models and simulators used to develop and run VR applications. +Existing user simulators often struggle to capture the intricacies of real-world VR applications, reducing ecological validity of user predictions. +In this paper, we introduce SIM2VR, a system that aligns user simulation with a given VR application by establishing a continuous closed loop between the two processes. +This, for the first time, enables training simulated users directly in the same VR application that real users interact with. +We demonstrate that SIM2VR can predict differences in user performance, ergonomics and strategies in a fast-paced, dynamic arcade game. In order to expand the scope of automated biomechanical testing beyond simple visuomotor tasks, advances in cognitive models and reward function design will be needed.

+

Hands-on, Hands-off: Gaze-Assisted Bimanual 3D Interaction

+

Authors: Mathias Lystbæk, Thorbjørn Mikkelsen, Roland Krisztandl, Eric Gonzalez, Mar Gonzalez-Franco, Hans Gellersen, Ken Pfeuffer

+

Link

+

Abstract: Extended Reality (XR) systems with hand-tracking support direct manipulation of objects with both hands. A common interaction in this context is for the non-dominant hand (NDH) to orient an object for input by the dominant hand (DH). We explore bimanual interaction with gaze through three new modes of interaction where the input of the NDH, DH, or both hands is indirect based on Gaze+Pinch. These modes enable a new dynamic interplay between our hands, allowing flexible alternation between and pairing of complementary operations. Through applications, we demonstrate several use cases in the context of 3D modelling, where users exploit occlusion-free, low-effort, and fluid two-handed manipulation. To gain a deeper understanding of each mode, we present a user study on an asymmetric rotate-translate task. Most participants preferred indirect input with both hands for lower physical effort, without a penalty on user performance. Otherwise, they preferred modes where the NDH oriented the object directly, supporting preshaping of the hand, which is more challenging with indirect gestures. The insights gained are of relevance for the design of XR interfaces that aim to leverage eye and hand input in tandem.

+

Pro-Tact: Hierarchical Synthesis of Proprioception and Tactile Exploration for Eyes-Free Ray Pointing on Out-of-View VR Menus

+

Authors: Yeonsu Kim, Jisu Yim, Kyunghwan Kim, Yohan Yun, Geehyuk Lee

+

Link

+

Abstract: We introduce Pro-Tact, a novel eyes-free pointing technique for interacting with out-of-view (OoV) VR menus. This technique combines rapid rough pointing using proprioception with fine-grain adjustments through tactile exploration, enabling menu interaction without visual attention. Our user study demonstrated that Pro-Tact allows users to select menu items accurately (95% accuracy for 54 items) in an eyes-free manner, with reduced fatigue and sickness compared to eyes-engaged interaction. Additionally, we observed that participants voluntarily interacted with OoV menus eyes-free when Pro-Tact's tactile feedback was provided in practical VR application usage contexts. This research contributes by introducing the novel interaction technique, Pro-Tact, and quantitatively evaluating its benefits in terms of performance, user experience, and user preference in OoV menu interactions.

+

GradualReality: Enhancing Physical Object Interaction in Virtual Reality via Interaction State-Aware Blending

+

Authors: HyunA Seo, Juheon Yi, Rajesh Balan, Youngki Lee

+

Link

+

Abstract: We present GradualReality, a novel interface enabling a Cross Reality experience that includes gradual interaction with physical objects in a virtual environment and supports both presence and usability. Daily Cross Reality interaction is challenging as the user's physical object interaction state is continuously changing over time, causing their attention to frequently shift between the virtual and physical worlds. As such, presence in the virtual environment and seamless usability for interacting with physical objects should be maintained at a high level. To address this issue, we present an Interaction State-Aware Blending approach that (i) balances immersion and interaction capability and (ii) provides a fine-grained, gradual transition between virtual and physical worlds. The key idea includes categorizing the flow of physical object interaction into multiple states and designing novel blending methods that offer optimal presence and sufficient physical awareness at each state. We performed extensive user studies and interviews with a working prototype and demonstrated that GradualReality provides better Cross Reality experiences compared to baselines.

+

StegoType: Surface Typing from Egocentric Cameras

+

Authors: Mark Richardson, Fadi Botros, Yangyang Shi, Pinhao Guo, Bradford Snow, Linguang Zhang, Jingming Dong, Keith Vertanen, Shugao Ma, Robert Wang

+

Link

+

Abstract: Text input is a critical component of any general purpose computing system, yet efficient and natural text input remains a challenge in AR and VR. Headset based hand-tracking has recently become pervasive among consumer VR devices and affords the opportunity to enable touch typing on virtual keyboards. We present an approach for decoding touch typing on uninstrumented flat surfaces using only egocentric camera-based hand-tracking as input. While egocentric hand-tracking accuracy is limited by issues like self occlusion and image fidelity, we show that a sufficiently diverse training set of hand motions paired with typed text can enable a deep learning model to extract signal from this noisy input. +Furthermore, by carefully designing a closed-loop data collection process, we can train an end-to-end text decoder that accounts for natural sloppy typing on virtual keyboards.
+We evaluate our work with a user study (n=18) showing a mean online throughput of 42.4 WPM with an uncorrected error rate (UER) of 7% with our method compared to a physical keyboard baseline of 74.5 WPM at 0.8% UER, showing progress towards unlocking productivity and high throughput use cases in AR/VR.

+

Eye-Hand Movement of Objects in Near Space Extended Reality

+

Authors: Uta Wagner, Andreas Asferg Jacobsen, Tiare Feuchtner, Hans Gellersen, Ken Pfeuffer

+

Link

+

Abstract: Hand-tracking in Extended Reality (XR) enables moving objects in near space with direct hand gestures, to pick, drag and drop objects in 3D. In this work, we investigate the use of eye-tracking to reduce the effort involved in this interaction. As the eyes naturally look ahead to the target for a drag operation, the principal idea is to map the translation of the object in the image plane to gaze, such that the hand only needs to control the depth component of the operation. We have implemented four techniques that explore two factors: the use of gaze only to move objects in X-Y vs.\ extra refinement by hand, and the use of hand input in + the Z axis to directly move objects vs.\ indirectly via a transfer function. We compared all four techniques in a user study (N=24) against baselines of direct and indirect hand input. We detail user performance, effort and experience trade-offs and show that all eye-hand techniques significantly reduce physical effort over direct gestures, pointing toward effortless drag-and-drop for XR environments.

+

Break Q&A: Contextual Augmentations

+

StreetNav: Leveraging Street Cameras to Support Precise Outdoor Navigation for Blind Pedestrians

+

Authors: Gaurav Jain, Basel Hindi, Zihao Zhang, Koushik Srinivasula, Mingyu Xie, Mahshid Ghasemi, Daniel Weiner, Sophie Ana Paris, Xin Yi Therese Xu, Michael Malcolm, Mehmet Kerem Turkcan, Javad Ghaderi, Zoran Kostic, Gil Zussman, Brian Smith

+

Link

+

Abstract: Blind and low-vision (BLV) people rely on GPS-based systems for outdoor navigation. GPS's inaccuracy, however, causes them to veer off track, run into obstacles, and struggle to reach precise destinations. While prior work has made precise navigation possible indoors via hardware installations, enabling this outdoors remains a challenge. Interestingly, many outdoor environments are already instrumented with hardware such as street cameras. In this work, we explore the idea of repurposing existing street cameras for outdoor navigation. Our community-driven approach considers both technical and sociotechnical concerns through engagements with various stakeholders: BLV users, residents, business owners, and Community Board leadership. The resulting system, StreetNav, processes a camera's video feed using computer vision and gives BLV pedestrians real-time navigation assistance. Our evaluations show that StreetNav guides users more precisely than GPS, but its technical performance is sensitive to environmental occlusions and distance from the camera. We discuss future implications for deploying such systems at scale.

+

WorldScribe: Towards Context-Aware Live Visual Descriptions

+

BEST_PAPER

+

Authors: Ruei-Che Chang, Yuxuan Liu, Anhong Guo

+

Link

+

Abstract: Automated live visual descriptions can aid blind people in understanding their surroundings with autonomy and independence. However, providing descriptions that are rich, contextual, and just-in-time has been a long-standing challenge in accessibility. In this work, we develop WorldScribe, a system that generates automated live real-world visual descriptions that are customizable and adaptive to users' contexts: (i) WorldScribe's descriptions are tailored to users' intents and prioritized based on semantic relevance. (ii) WorldScribe is adaptive to visual contexts, e.g., providing consecutively succinct descriptions for dynamic scenes, while presenting longer and detailed ones for stable settings. (iii) WorldScribe is adaptive to sound contexts, e.g., increasing volume in noisy environments, or pausing when conversations start. Powered by a suite of vision, language, and sound recognition models, WorldScribe introduces a description generation pipeline that balances the tradeoffs between their richness and latency to support real-time use. The design of WorldScribe is informed by prior work on providing visual descriptions and a formative study with blind participants. Our user study and subsequent pipeline evaluation show that WorldScribe can provide real-time and fairly accurate visual descriptions to facilitate environment understanding that is adaptive and customized to users' contexts. Finally, we discuss the implications and further steps toward making live visual descriptions more context-aware and humanized.

+

CookAR: Affordance Augmentations in Wearable AR to Support Kitchen Tool Interactions for People with Low Vision

+

Authors: Jaewook Lee, Andrew Tjahjadi, Jiho Kim, Junpu Yu, Minji Park, Jiawen Zhang, Jon Froehlich, Yapeng Tian, Yuhang Zhao

+

Link

+

Abstract: Cooking is a central activity of daily living, supporting independence as well as mental and physical health. However, prior work has highlighted key barriers for people with low vision (LV) to cook, particularly around safely interacting with tools, such as sharp knives or hot pans. Drawing on recent advancements in computer vision (CV), we present CookAR, a head-mounted AR system with real-time object affordance augmentations to support safe and efficient interactions with kitchen tools. To design and implement CookAR, we collected and annotated the first egocentric dataset of kitchen tool affordances, fine-tuned an affordance segmentation model, and developed an AR system with a stereo camera to generate visual augmentations. To validate CookAR, we conducted a technical evaluation of our fine-tuned model as well as a qualitative lab study with 10 LV participants for suitable augmentation design. Our technical evaluation demonstrates that our model outperforms the baseline on our tool affordance dataset, while our user study indicates a preference for affordance augmentations over the traditional whole object augmentations.

+

DesignChecker: Visual Design Support for Blind and Low Vision Web Developers

+

Authors: Mina Huh, Amy Pavel

+

Link

+

Abstract: Blind and low vision (BLV) developers create websites to share knowledge and showcase their work. A well-designed website can engage audiences and deliver information effectively, yet it remains challenging for BLV developers to review their web designs. We conducted interviews with BLV developers (N=9) and analyzed 20 websites created by BLV developers. BLV developers created highly accessible websites but wanted to assess the usability of their websites for sighted users and follow the design standards of other websites. They also encountered challenges using screen readers to identify illegible text, misaligned elements, and inharmonious colors. We present DesignChecker, a browser extension that helps BLV developers improve their web designs. With DesignChecker, users can assess their current design by comparing it to visual design guidelines, a reference website of their choice, or a set of similar websites. DesignChecker also identifies the specific HTML elements that violate design guidelines and suggests CSS changes for improvements. Our user study participants (N=8) recognized more visual design errors than using their typical workflow and expressed enthusiasm about using DesignChecker in the future.

+

Break Q&A: Machine Learning for User Interfaces

+

UIClip: A Data-driven Model for Assessing User Interface Design

+

Authors: Jason Wu, Yi-Hao Peng, Xin Yue Li, Amanda Swearngin, Jeffrey Bigham, Jeffrey Nichols

+

Link

+

Abstract: User interface (UI) design is a difficult yet important task for ensuring the usability, accessibility, and aesthetic qualities of applications. In our paper, we develop a machine-learned model, UIClip, for assessing the design quality and visual relevance of a UI given its screenshot and natural language description. To train UIClip, we used a combination of automated crawling, synthetic augmentation, and human ratings to construct a large-scale dataset of UIs, collated by description and ranked by design quality. Through training on the dataset, UIClip implicitly learns properties of good and bad designs by (i) assigning a numerical score that represents a UI design's relevance and quality and (ii) providing design suggestions. In an evaluation that compared the outputs of UIClip and other baselines to UIs rated by 12 human designers, we found that UIClip achieved the highest agreement with ground-truth rankings. Finally, we present three example applications that demonstrate how UIClip can facilitate downstream applications that rely on instantaneous assessment of UI design quality: (i) UI code generation, (ii) UI design tips generation, and (iii) quality-aware UI example search.

+

UICrit: Enhancing Automated Design Evaluation with a UI Critique Dataset

+

Authors: Peitong Duan, Chin-Yi Cheng, Gang Li, Bjoern Hartmann, Yang Li

+

Link

+

Abstract: Automated UI evaluation can be beneficial for the design process; for example, to compare different UI designs, or conduct automated heuristic evaluation. LLM-based UI evaluation, in particular, holds the promise of generalizability to a wide variety of UI types and evaluation tasks. However, current LLM-based techniques do not yet match the performance of human evaluators. We hypothesize that automatic evaluation can be improved by collecting a targeted UI feedback dataset and then using this dataset to enhance the performance of general-purpose LLMs. We present a targeted dataset of 3,059 design critiques and quality ratings for 983 mobile UIs, collected from seven designers, each with at least a year of professional design experience. We carried out an in-depth analysis to characterize the dataset's features. We then applied this dataset to achieve a 55\% performance gain in LLM-generated UI feedback via various few-shot and visual prompting techniques. We also discuss future applications of this dataset, including training a reward model for generative UI techniques, and fine-tuning a tool-agnostic multi-modal LLM that automates UI evaluation.

+

EyeFormer: Predicting Personalized Scanpaths with Transformer-Guided Reinforcement Learning

+

Authors: Yue Jiang, Zixin Guo, Hamed Rezazadegan Tavakoli, Luis Leiva, Antti Oulasvirta

+

Link

+

Abstract: From a visual-perception perspective, modern graphical user interfaces (GUIs) comprise a complex graphics-rich two-dimensional visuospatial arrangement of text, images, and interactive objects such as buttons and menus. While existing models can accurately predict regions and objects that are likely to attract attention ``on average'', no scanpath model has been capable of predicting scanpaths for an individual. To close this gap, we introduce EyeFormer, which utilizes a Transformer architecture as a policy network to guide a deep reinforcement learning algorithm that predicts gaze locations. Our model offers the unique capability of producing personalized predictions when given a few user scanpath samples. It can predict full scanpath information, including fixation positions and durations, across individuals and various stimulus types. Additionally, we demonstrate applications in GUI layout optimization driven by our model.

+

GPTVoiceTasker: Advancing Multi-step Mobile Task Efficiency Through Dynamic Interface Exploration and Learning

+

Authors: Minh Duc Vu, Han Wang, Jieshan Chen, Zhuang Li, Shengdong Zhao, Zhenchang Xing, Chunyang Chen

+

Link

+

Abstract: Virtual assistants have the potential to play an important role in helping users achieve different tasks. However, these systems face challenges in their real-world usability, characterized by inefficiency and struggles in grasping user intentions. Leveraging recent advances in Large Language Models (LLMs), we introduce GPTVoiceTasker, a virtual assistant poised to enhance user experiences and task efficiency on mobile devices. GPTVoiceTasker excels at intelligently deciphering user commands and executing relevant device interactions to streamline task completion. For unprecedented tasks, GPTVoiceTasker utilises the contextual information and on-screen content to continuously explore and execute the tasks. In addition, the system continually learns from historical user commands to automate subsequent task invocations, further enhancing execution efficiency. From our experiments, GPTVoiceTasker achieved 84.5% accuracy in parsing human commands into executable actions and 85.7% accuracy in automating multi-step tasks. In our user study, GPTVoiceTasker boosted task efficiency in real-world scenarios by 34.85%, accompanied by positive participant feedback. We made GPTVoiceTasker open-source, inviting further research into LLMs utilization for diverse tasks through prompt engineering and leveraging user usage data to improve efficiency.

+

VisionTasker: Mobile Task Automation Using Vision Based UI Understanding and LLM Task Planning

+

Authors: Yunpeng Song, Yiheng Bian, Yongtao Tang, Guiyu Ma, Zhongmin Cai

+

Link

+

Abstract: Mobile task automation is an emerging field that leverages AI to streamline and optimize the execution of routine tasks on mobile devices, thereby enhancing efficiency and productivity. Traditional methods, such as Programming By Demonstration (PBD), are limited due to their dependence on predefined tasks and susceptibility to app updates. Recent advancements have utilized the view hierarchy to collect UI information and employed Large Language Models (LLM) to enhance task automation. However, view hierarchies have accessibility issues and face potential problems like missing object descriptions or misaligned structures. This paper introduces VisionTasker, a two-stage framework combining vision-based UI understanding and LLM task planning, for mobile task automation in a step-by-step manner. VisionTasker firstly converts a UI screenshot into natural language interpretations using a vision-based UI understanding approach, eliminating the need for view hierarchies. Secondly, it adopts a step-by-step task planning method, presenting one interface at a time to the LLM. The LLM then identifies relevant elements within the interface and determines the next action, enhancing accuracy and practicality. Extensive experiments show that VisionTasker outperforms previous methods, providing effective UI representations across four datasets. Additionally, in automating 147 real-world tasks on an Android smartphone, VisionTasker demonstrates advantages over humans in tasks where humans show unfamiliarity and shows significant improvements when integrated with the PBD mechanism. VisionTasker is open-source and available at https://github.com/AkimotoAyako/VisionTasker.

+

Break Q&A: Poses as Input

+

SolePoser: Real-Time 3D Human Pose Estimation using Insole Pressure Sensors

+

Authors: Erwin Wu, Rawal Khirodkar, Hideki Koike, Kris Kitani

+

Link

+

Abstract: We propose SolePoser, a real-time 3D pose estimation system that leverages only a single pair of insole sensors. Unlike conventional methods relying on fixed cameras or bulky wearable sensors, our approach offers minimal and natural setup requirements. The proposed system utilizes pressure and IMU sensors embedded in insoles to capture the body weight's pressure distribution at the feet and its 6 DoF acceleration. This information is used to estimate the 3D full-body joint position by a two-stream transformer network. A novel double-cycle consistency loss and a cross-attention module are further introduced to learn the relationship between 3D foot positions and their pressure distributions. +We also introduced two different datasets of sports and daily exercises, offering 908k frames across eight different activities. Our experiments show that our method's performance is on par with top-performing approaches, which utilize more IMUs and even outperform third-person-view camera-based methods in certain scenarios.

+

Gait Gestures: Examining Stride and Foot Strike Variation as an Input Method While Walking

+

Authors: Ching-Yi Tsai, Ryan Yen, Daekun Kim, Daniel Vogel

+

Link

+

Abstract: Walking is a cyclic pattern of alternating footstep strikes, with each pair of steps forming a stride, and a series of strides forming a gait. We conduct a systematic examination of different kinds of intentional variations from a normal gait that could be used as input actions without interrupting overall walking progress. A design space of 22 candidate Gait Gestures is generated by adapting previous standing foot input actions and identifying new actions possible in a walking context. A formative study (n=25) examines movement easiness, social acceptability, and walking compatibility with foot movement logging to calculate temporal and spatial characteristics. Using a categorization of these results, 7 gestures are selected for a wizard-of-oz prototype demonstrating an AR interface controlled by Gait Gestures for ordering food and audio playback while walking. As a technical proof-of-concept, a gait gesture recognizer is developed and tested using the formative study data.

+

EgoTouch: On-Body Touch Input Using AR/VR Headset Cameras

+

Authors: Vimal Mollyn, Chris Harrison

+

Link

+

Abstract: In augmented and virtual reality (AR/VR) experiences, a user’s arms and hands can provide a convenient and tactile surface for touch input. Prior work has shown on-body input to have significant speed, accuracy, and ergonomic benefits over in-air interfaces, which are common today. In this work, we demonstrate high accuracy, bare hands (i.e., no special instrumentation of the user) skin input using just an RGB camera, like those already integrated into all modern XR headsets. Our results show this approach can be accurate, and robust across diverse lighting conditions, skin tones, and body motion (e.g., input while walking). Finally, our pipeline also provides rich input metadata including touch force, finger identification, angle of attack, and rotation. We believe these are the requisite technical ingredients to more fully unlock on-skin interfaces that have been well motivated in the HCI literature but have lacked robust and practical methods.

+

MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices

+

Authors: Vasco Xu, Chenfeng Gao, Henry Hoffman, Karan Ahuja

+

Link

+

Abstract: There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few.

+

Touchscreen-based Hand Tracking for Remote Whiteboard Interaction

+

Authors: Xinshuang Liu, Yizhong Zhang, Xin Tong

+

Link

+

Abstract: In whiteboard-based remote communication, the seamless integration of drawn content and hand-screen interactions is essential for an immersive user experience. Previous methods either require bulky device setups for capturing hand gestures or fail to accurately track the hand poses from capacitive images. In this paper, we present a real-time method for precise tracking 3D poses of both hands from capacitive video frames. To this end, we develop a deep neural network to identify hands and infer hand joint positions from capacitive frames, and then recover 3D hand poses from the hand-joint positions via a constrained inverse kinematic solver. Additionally, we design a device setup for capturing high-quality hand-screen interaction data and obtained a more accurate synchronized capacitive video and hand pose dataset. Our method improves the accuracy and stability of 3D hand tracking for capacitive frames while maintaining a compact device setup for remote communication. We validate our scheme design and its superior performance on 3D hand pose tracking and demonstrate the effectiveness of our method in whiteboard-based remote communication.

+

SeamPose: Repurposing Seams as Capacitive Sensors in a Shirt for Upper-Body Pose Tracking

+

Authors: Tianhong Yu, Mary Zhang, Peter He, Chi-Jung Lee, Cassidy Cheesman, Saif Mahmud, Ruidong Zhang, Francois Guimbretiere, Cheng Zhang

+

Link

+

Abstract: Seams are areas of overlapping fabric formed by stitching two or more pieces of fabric together in the cut-and-sew apparel manufacturing process. In SeamPose, we repurposed seams as capacitive sensors in a shirt for continuous upper-body pose estimation. Compared to previous all-textile motion-capturing garments that place the electrodes on the clothing surface, our solution leverages existing seams inside of a shirt by machine-sewing insulated conductive threads over the seams. The unique invisibilities and placements of the seams afford the sensing shirt to look and wear similarly as a conventional shirt while providing exciting pose-tracking capabilities. To validate this approach, we implemented a proof-of-concept untethered shirt with 8 capacitive sensing seams. With a 12-participant user study, our customized deep-learning pipeline accurately estimates the relative (to the pelvis) upper-body 3D joint positions with a mean per joint position error (MPJPE) of 6.0 cm. SeamPose represents a step towards unobtrusive integration of smart clothing for everyday pose estimation.

+

Break Q&A: A11y

+

ProgramAlly: Creating Custom Visual Access Programs via Multi-Modal End-User Programming

+

Authors: Jaylin Herskovitz, Andi Xu, Rahaf Alharbi, Anhong Guo

+

Link

+

Abstract: Existing visual assistive technologies are built for simple and common use cases, and have few avenues for blind people to customize their functionalities. Drawing from prior work on DIY assistive technology, this paper investigates end-user programming as a means for users to create and customize visual access programs to meet their unique needs. We introduce ProgramAlly, a system for creating custom filters for visual information, e.g., 'find NUMBER on BUS', leveraging three end-user programming approaches: block programming, natural language, and programming by example. To implement ProgramAlly, we designed a representation of visual filtering tasks based on scenarios encountered by blind people, and integrated a set of on-device and cloud models for generating and running these programs. In user studies with 12 blind adults, we found that participants preferred different programming modalities depending on the task, and envisioned using visual access programs to address unique accessibility challenges that are otherwise difficult with existing applications. Through ProgramAlly, we present an exploration of how blind end-users can create visual access programs to customize and control their experiences.

+

Accessible Gesture Typing on Smartphones for People with Low Vision

+

Authors: Dan Zhang, Zhi Li, Vikas Ashok, William H Seiple, IV Ramakrishnan, Xiaojun Bi

+

Link

+

Abstract: While gesture typing is widely adopted on touchscreen keyboards, its support for low vision users is limited. We have designed and implemented two keyboard prototypes, layout-magnified and key-magnified keyboards, to enable gesture typing for people with low vision. Both keyboards facilitate uninterrupted access to all keys while the screen magnifier is active, allowing people with low vision to input text with one continuous stroke. Furthermore, we have created a kinematics-based decoding algorithm to accommodate the typing behavior of people with low vision. This algorithm can decode the gesture input even if the gesture trace deviates from a pre-defined word template, and the starting position of the gesture is far from the starting letter of the target word. Our user study showed that the key-magnified keyboard achieved 5.28 words per minute, 27.5% faster than a conventional gesture typing keyboard with voice feedback.

+

AccessTeleopKit: A Toolkit for Creating Accessible Web-Based Interfaces for Tele-Operating an Assistive Robot

+

Authors: Vinitha Ranganeni, Varad Dhat, Noah Ponto, Maya Cakmak

+

Link

+

Abstract: Mobile manipulator robots, which can move around and physically interact with their environments, can empower people with motor limitations to independently carry out many activities of daily living. While many interfaces have been developed for tele-operating complex robots, most of them are not accessible to people with severe motor limitations. Further, most interfaces are rigid with limited configurations and are not readily available to download and use. To address these barriers, we developed AccessTeleopKit: an open-source toolkit for creating custom and accessible robot tele-operation interfaces based on cursor-and-click input for the Stretch 3 mobile-manipulator. With AccessTeleopKit users can add, remove, and rearrange components such as buttons and camera views, and select between a variety of control modes. We describe the participatory and iterative design process that led to the current implementation of AccessTeleopKit, involving three long-term deployments of the robot in the home of a quadriplegic user. We demonstrate how AccessTeleopKit allowed the user to create different interfaces for different tasks and the diversity of tasks it allowed the user to carry out. We also present two studies involving six additional users with severe motor limitations, demonstrating the power of AccessTeleopKit in creating custom interfaces for different user needs and preferences.

+

Memory Reviver: Supporting Photo-Collection Reminiscence for People with Visual Impairment via a Proactive Chatbot

+

Authors: Shuchang Xu, Chang Chen, Zichen LIU, Xiaofu Jin, Linping Yuan, Yukang Yan, Huamin Qu

+

Link

+

Abstract: Reminiscing with photo collections offers significant psychological benefits but poses challenges for people with visual impairment (PVI). Their current reliance on sighted help restricts the flexibility of this activity. In response, we explored using a chatbot in a preliminary study. We identified two primary challenges that hinder effective reminiscence with a chatbot: the scattering of information and a lack of proactive guidance. To address these limitations, we present Memory Reviver, a proactive chatbot that helps PVI reminisce with a photo collection through natural language communication. Memory Reviver incorporates two novel features: (1) a Memory Tree, which uses a hierarchical structure to organize the information in a photo collection; and (2) a Proactive Strategy, which actively delivers information to users at proper conversation rounds. Evaluation with twelve PVI demonstrated that Memory Reviver effectively facilitated engaging reminiscence, enhanced understanding of photo collections, and delivered natural conversational experiences. Based on our findings, we distill implications for supporting photo reminiscence and designing chatbots for PVI.

+

VizAbility: Enhancing Chart Accessibility with LLM-based Conversational Interaction

+

Authors: Joshua Gorniak, Yoon Kim, Donglai Wei, Nam Wook Kim

+

Link

+

Abstract: Traditional accessibility methods like alternative text and data tables typically underrepresent data visualization's full potential. Keyboard-based chart navigation has emerged as a potential solution, yet efficient data exploration remains challenging. We present VizAbility, a novel system that enriches chart content navigation with conversational interaction, enabling users to use natural language for querying visual data trends. VizAbility adapts to the user's navigation context for improved response accuracy and facilitates verbal command-based chart navigation. Furthermore, it can address queries for contextual information, designed to address the needs of visually impaired users. We designed a large language model (LLM)-based pipeline to address these user queries, leveraging chart data & encoding, user context, and external web knowledge. We conducted both qualitative and quantitative studies to evaluate VizAbility's multimodal approach. We discuss further opportunities based on the results, including improved benchmark testing, incorporation of vision models, and integration with visualization workflows.

+

Computational Trichromacy Reconstruction: Empowering the Color-Vision Deficient to Recognize Colors Using Augmented Reality

+

Authors: Yuhao Zhu, Ethan Chen, Colin Hascup, Yukang Yan, Gaurav Sharma

+

Link

+

Abstract: We propose an assistive technology that helps individuals with Color Vision Deficiencies (CVD) to recognize/name colors. +A dichromat's color perception is a reduced two-dimensional (2D) subset of a normal +trichromat's three dimensional color (3D) perception, leading to confusion when visual stimuli that appear identical to the dichromat are referred to by different color names. +Using our proposed system, CVD individuals can interactively induce distinct perceptual changes to originally confusing colors via a computational color space transformation. +By combining their original 2D precepts for colors with the discriminative changes, a three dimensional color space is reconstructed, where the dichromat can learn to resolve color name confusions and accurately recognize colors. +Our system is implemented as an Augmented Reality (AR) interface on smartphones, where users interactively control the rotation through swipe gestures and observe the induced color shifts in the camera view or in a displayed image. Through psychophysical experiments and a longitudinal user study, we demonstrate that such rotational color shifts have discriminative power (initially confusing colors become distinct under rotation) and exhibit structured perceptual shifts dichromats can learn with modest training. The AR App is also evaluated in two real-world scenarios (building with lego blocks and interpreting artistic works); users all report positive experience in using the App to recognize object colors that they otherwise could not.

+

Break Q&A: Sustainable Interfaces

+

Degrade to Function: Towards Eco-friendly Morphing Devices that Function Through Programmed Sequential Degradation

+

Authors: Qiuyu Lu, Semina Yi, Mengtian Gan, Jihong Huang, Xiao Zhang, Yue Yang, Chenyi Shen, Lining Yao

+

Link

+

Abstract: While it seems counterintuitive to think of degradation within an operating device as beneficial, one may argue that when rationally designed, the controlled breakdown of materials—physical, chemical, or biological—can be harnessed for specific functions. To apply this principle to the design of morphing devices, we introduce the concept of "Degrade to Function" (DtF). This concept aims to create eco-friendly and self-contained morphing devices that operate through a sequence of environmentally-triggered degradations. We explore its design considerations and implementation techniques by identifying environmental conditions and degradation types that can be exploited, evaluating potential materials capable of controlled degradation, suggesting designs for structures that can leverage degradation to achieve various transformations and functions, and developing sequential control approaches that integrate degradation triggers. To demonstrate the viability and versatility of this design strategy, we showcase several application examples across a range of environmental conditions.

+

WasteBanned: Supporting Zero Waste Fashion Design Through Linked Edits

+

Authors: Ruowang Zhang, Stefanie Mueller, Gilbert Bernstein, Adriana Schulz, Mackenzie Leake

+

Link

+

Abstract: The commonly used cut-and-sew garment construction process, in which 2D fabric panels are cut from sheets of fabric and assembled into 3D garments, contributes to widespread textile waste in the fashion industry. There is often a significant divide between the design of the garment and the layout of the panels. One opportunity for bridging this gap is the emerging study and practice of zero waste fashion design, which involves creating clothing designs with maximum layout efficiency. Enforcing the strict constraints of zero waste sewing is challenging, as edits to one region of the garment necessarily affect neighboring panels. Based on our formative work to understand this emerging area within fashion design, we present WasteBanned, a tool that combines CAM and CAD to help users prioritize efficient material usage, work within these zero waste constraints, and edit existing zero waste garment patterns. Our user evaluation indicates that our tool helps fashion designers edit zero waste patterns to fit different bodies and add stylistic variation, while creating highly efficient fabric layouts.

+

HoloChemie - Sustainable Fabrication of Soft Biochemical Holographic Devices for Ubiquitous Sensing

+

Authors: Sutirtha Roy, Moshfiq-Us-Saleheen Chowdhury, Jurjaan Noim, Richa Pandey, Aditya Shekhar Nittala

+

Link

+

Abstract: Sustainable fabrication approaches and biomaterials are increasingly being used in HCI to fabricate interactive devices. However, the majority of the work has focused on integrating electronics. This paper takes a sustainable approach to exploring the fabrication of biochemical sensing devices. Firstly, we contribute a set of biochemical formulations for biological and environmental sensing with bio-sourced and environment-friendly substrate materials. Our formulations are based on a combination of enzymes derived from bacteria and fungi, plant extracts and commercially available chemicals to sense both liquid and gaseous analytes: glucose, lactic acid, pH levels and carbon dioxide. Our novel holographic sensing scheme allows for detecting the presence of analytes and enables quantitative estimation of the analyte levels. We present a set of application scenarios that demonstrate the versatility of our approach and discuss the sustainability aspects, its limitations, and the implications for bio-chemical systems in HCI.

+

Break Q&A: FABulous

+

Facilitating the Parametric Definition of Geometric Properties in Programming-Based CAD

+

Authors: J Gonzalez Avila, Thomas Pietrzak, Audrey Girouard, Géry Casiez

+

Link

+

Abstract: Parametric Computer-aided design (CAD) enables the creation of reusable models by integrating variables into geometric properties, facilitating customization without a complete redesign. However, creating parametric designs in programming-based CAD presents significant challenges. Users define models in a code editor using a programming language, with the application generating a visual representation in a viewport. This process involves complex programming and arithmetic expressions to describe geometric properties, linking various object properties to create parametric designs. Unfortunately, these applications lack assistance, making the process unnecessarily demanding. We propose a solution that allows users to retrieve parametric expressions from the visual representation for reuse in the code, streamlining the design process. We demonstrated this concept through a proof-of-concept implemented in the programming-based CAD application, OpenSCAD, and conducted an experiment with 11 users. Our findings suggest that this solution could significantly reduce design errors, improve interactivity and engagement in the design process, and lower the entry barrier for newcomers by reducing the mathematical skills typically required in programming-based CAD applications

+

Rhapso: Automatically Embedding Fiber Materials into 3D Prints for Enhanced Interactivity

+

Authors: Daniel Ashbrook, Wei-Ju Lin, Nicholas Bentley, Diana Soponar, Zeyu Yan, Valkyrie Savage, Lung-Pan Cheng, Huaishu Peng, Hyunyoung Kim

+

Link

+

Abstract: We introduce Rhapso, a 3D printing system designed to embed a diverse range of continuous fiber materials within 3D objects during the printing process. This approach enables integrating properties like tensile strength, force storage and transmission, or aesthetic and tactile characteristics, directly into low-cost thermoplastic 3D prints. These functional objects can have intricate actuation, self-assembly, and sensing capabilities with little to no manual intervention. To achieve this, we modify a low-cost Fused Filament Fabrication (FFF) 3D printer, adding a stepper motor-controlled fiber spool mechanism on a gear ring above the print bed. In addition to hardware, we provide parsing software for precise fiber placement, which generates Gcode for printer operation. To illustrate the versatility of our system, we present applications that showcase its extensive design potential. Additionally, we offer comprehensive documentation and open designs, empowering others to replicate our system and explore its possibilities.

+

Speed-Modulated Ironing: High-Resolution Shade and Texture Gradients in Single-Material 3D Printing

+

Authors: Mehmet Ozdemir, Marwa AlAlawi, Mustafa Doga Dogan, Jose Martinez Castro, Stefanie Mueller, Zjenja Doubrovski

+

Link

+

Abstract: We present Speed-Modulated Ironing, a new fabrication method for programming visual and tactile properties in single-material 3D printing. We use one nozzle to 3D print and a second nozzle to reheat printed areas at varying speeds, controlling the material's temperature-response. The rapid adjustments of speed allow for fine-grained reheating, enabling high-resolution color and texture variations. We implemented our method in a tool that allows users to assign desired properties to 3D models and creates corresponding 3D printing instructions. We demonstrate our method with three temperature-responsive materials: a foaming filament, a filament with wood fibers, and a filament with cork particles. These filaments respond to temperature by changing color, roughness, transparency, and gloss. Our technical evaluation reveals the capabilities of our method in achieving sufficient resolution and color shade range that allows surface details such as small text, photos, and QR codes on 3D-printed objects. Finally, we provide application examples demonstrating the new design capabilities enabled by Speed-Modulated Ironing.

+

TRAvel Slicer: Continuous Extrusion Toolpaths for 3D Printing

+

Authors: Jaime Gould, Camila Friedman-Gerlicz, Leah Buechley

+

Link

+

Abstract: In this paper we present Travel Reduction Algorithm (TRAvel) Slicer, which minimizes travel movements in 3D printing. Conventional slicing software generates toolpaths with many travel movements--movements without material extrusion. Some 3D printers are incapable of starting and stopping extrusion and it is difficult to impossible to control the extrusion of many materials. This makes toolpaths with travel movements unsuitable for a wide range of printers and materials.

+

We developed the open-source TRAvel Slicer to enable the printing of complex 3D models on a wider range of printers and in a wider range of materials than is currently possible. TRAvel Slicer minimizes two different kinds of travel movements--what we term Inner- and Outer-Model travel. We minimize Inner-Model travel (travel within the 3D model) by generating space-filling Fermat spirals for each contiguous planar region of the model. We minimize Outer-Model travel (travels outside of the 3D model) by ordering the printing of different branches of the model, thus limiting transitions between branches. We present our algorithm and software and then demonstrate how: 1) TRAvel Slicer makes it possible to generate high-quality prints from a metal-clay material, CeraMetal, that is functionally unprintable using an off-the-shelf slicer. 2) TRAvel Slicer dramatically increases the printing efficiency of traditional plastic 3D printing compared to an off-the-shelf slicer.

+

Understanding and Supporting Debugging Workflows in CAD

+

Authors: Felix Hähnlein, Gilbert Bernstein, Adriana Schulz

+

Link

+

Abstract: One of the core promises of parametric Computer-Aided Design (CAD) is that users can easily edit their model at any point in time. +However, due to the ambiguity of changing references to intermediate, updated geometry, parametric edits can lead to reference errors which are difficult to fix in practice. +We claim that debugging reference errors remains challenging because CAD systems do not provide users with tools to understand where the error happened and how to fix it. +To address these challenges, we prototype a graphical debugging tool, DeCAD, which helps comparing CAD model states both across operations and across edits. +In a qualitative lab study, we use DeCAD as a probe to understand specific challenges that users face and what workflows they employ to overcome them. +We conclude with design implications for future debugging tool developers.

+

Break Q&A: Programming UI

+

NotePlayer: Engaging Jupyter Notebooks for Dynamic Presentation of Analytical Processes

+

Authors: Yang Ouyang, Leixian Shen, Yun Wang, Quan Li

+

Link

+

Abstract: Diverse presentation formats play a pivotal role in effectively conveying code and analytical processes during data analysis. One increasingly popular format is tutorial videos, particularly those based on Jupyter notebooks, which offer an intuitive interpretation of code and vivid explanations of analytical procedures. However, creating such videos requires a diverse skill set and significant manual effort, posing a barrier for many analysts. To bridge this gap, we introduce an innovative tool called NotePlayer, which connects notebook cells to video segments and incorporates a computational engine with language models to streamline video creation and editing. Our aim is to make the process more accessible and efficient for analysts. To inform the design of NotePlayer, we conducted a formative study and performed content analysis on a corpus of 38 Jupyter tutorial videos. This helped us identify key patterns and challenges encountered in existing tutorial videos, guiding the development of NotePlayer. Through a combination of a usage scenario and a user study, we validated the effectiveness of NotePlayer. The results show that the tool streamlines the video creation and facilitates the communication process for data analysts.

+

Tyche: Making Sense of Property-Based Testing Effectiveness

+

Authors: Harrison Goldstein, Jeffrey Tao, Zac Hatfield-Dodds, Benjamin Pierce, Andrew Head

+

Link

+

Abstract: Software developers increasingly rely on automated methods to assess the +correctness of their code. One such method is property-based testing +(PBT), wherein a test harness generates hundreds or thousands of inputs +and checks the outputs of the program on those inputs using parametric +properties. Though powerful, PBT induces a sizable gulf of evaluation: +developers need to put in nontrivial effort to understand how well the +different test inputs exercise the software under test. To bridge this +gulf, we propose Tyche, a user interface that supports sensemaking +around the effectiveness of property-based tests. Guided by a formative +design exploration, our design of Tyche supports developers with +interactive, configurable views of test behavior with tight integrations +into modern developer testing workflow. These views help developers +explore global testing behavior and individual test inputs alike. To +accelerate the development of powerful, interactive PBT tools, we define +a standard for PBT test reporting and integrate it with a widely used +PBT library. A self-guided online usability study revealed that Tyche's +visualizations help developers to more accurately assess software +testing effectiveness.

+

CoLadder: Manipulating Code Generation via Multi-Level Blocks

+

Authors: Ryan Yen, Jiawen Zhu, Sangho Suh, Haijun Xia, Jian Zhao

+

Link

+

Abstract: This paper adopted an iterative design process to gain insights into programmers' strategies when using LLMs for programming. We proposed CoLadder, a novel system that supports programmers by facilitating hierarchical task decomposition, direct code segment manipulation, and result evaluation during prompt authoring. A user study with 12 experienced programmers showed that CoLadder is effective in helping programmers externalize their problem-solving intentions flexibly, improving their ability to evaluate and modify code across various abstraction levels, from their task's goal to final code implementation.

+

SQLucid: Grounding Natural Language Database Queries with Interactive Explanations

+

Authors: Yuan Tian, Jonathan Kummerfeld, Toby Li, Tianyi Zhang

+

Link

+

Abstract: Though recent advances in machine learning have led to significant improvements in natural language interfaces for databases, the accuracy and reliability of these systems remain limited, especially in high-stakes domains. This paper introduces SQLucid, a novel user interface that bridges the gap between non-expert users and complex database querying processes. SQLucid addresses existing limitations by integrating visual correspondence, intermediate query results, and editable step-by-step SQL explanations in natural language to facilitate user understanding and engagement. This unique blend of features empowers users to understand and refine SQL queries easily and precisely. Two user studies and one quantitative experiment were conducted to validate SQLucid’s effectiveness, showing significant improvement in task completion accuracy and user confidence compared to existing interfaces. Our code is available at https://github.com/magic-YuanTian/SQLucid.

+

Break Q&A: AI & Automation

+

Memolet: Reifying the Reuse of User-AI Conversational Memories

+

Authors: Ryan Yen, Jian Zhao

+

Link

+

Abstract: As users engage more frequently with AI conversational agents, conversations may exceed their memory capacity, leading to failures in correctly leveraging certain memories for tailored responses. However, in finding past memories that can be reused or referenced, users need to retrieve relevant information in various conversations and articulate to the AI their intention to reuse these memories. To support this process, we introduce Memolet, an interactive object that reifies memory reuse. Users can directly manipulate Memolet to specify which memories to reuse and how to use them. We developed a system demonstrating Memolet's interaction across various memory reuse stages, including memory extraction, organization, prompt articulation, and generation refinement. We examine the system's usefulness with an N=12 within-subject study and provide design implications for future systems that support user-AI conversational memory reusing.

+

VIME: Visual Interactive Model Explorer for Identifying Capabilities and Limitations of Machine Learning Models for Sequential Decision-Making

+

Authors: Anindya Das Antar, Somayeh Molaei, Yan-Ying Chen, Matthew Lee, Nikola Banovic

+

Link

+

Abstract: Ensuring that Machine Learning (ML) models make correct and meaningful inferences is necessary for the broader adoption of such models into high-stakes decision-making scenarios. Thus, ML model engineers increasingly use eXplainable AI (XAI) tools to investigate the capabilities and limitations of their ML models before deployment. However, explaining sequential ML models, which make a series of decisions at each timestep, remains challenging. We present Visual Interactive Model Explorer (VIME), an XAI toolbox that enables ML model engineers to explain decisions of sequential models in different what-if'' scenarios. Our evaluation with 14 ML experts, who investigated two existing sequential ML models using VIME and a baseline XAI toolbox to explorewhat-if'' scenarios, showed that VIME made it easier to identify and explain instances when the models made wrong decisions compared to the baseline. Our work informs the design of future interactive XAI mechanisms for evaluating sequential ML-based decision support systems.

+

SERENUS: Alleviating Low-Battery Anxiety Through Real-time, Accurate, and User-Friendly Energy Consumption Prediction of Mobile Applications

+

Authors: Sera Lee, Dae R. Jeong, Junyoung Choi, Jaeheon Kwak, Seoyun Son, Jean Song, Insik Shin

+

Link

+

Abstract: Low-battery anxiety has emerged as a result of growing dependence on mobile devices, where the anxiety arises when the battery level runs low. While battery life can be extended through power-efficient hardware and software optimization techniques, low-battery anxiety will remain a phenomenon as long as mobile devices rely on batteries. In this paper, we investigate how an accurate real-time energy consumption prediction at the application-level can improve the user experience in low-battery situations. We present Serenus, a mobile system framework specifically tailored to predict the energy consumption of each mobile application and present the prediction in a user-friendly manner. We conducted user studies using Serenus to verify that highly accurate energy consumption predictions can effectively alleviate low-battery anxiety by assisting users in planning their application usage based on the remaining battery life. We summarize requirements to mitigate users’ anxiety, guiding the design of future mobile system frameworks.

+

Break Q&A: AI as Copilot

+

DiscipLink: Unfolding Interdisciplinary Information Seeking Process via Human-AI Co-Exploration

+

Authors: Chengbo Zheng, Yuanhao Zhang, Zeyu Huang, Chuhan Shi, Minrui Xu, Xiaojuan Ma

+

Link

+

Abstract: Interdisciplinary studies often require researchers to explore literature in diverse branches of knowledge. Yet, navigating through the highly scattered knowledge from unfamiliar disciplines poses a significant challenge. In this paper, we introduce DiscipLink, a novel interactive system that facilitates collaboration between researchers and large language models (LLMs) in interdisciplinary information seeking (IIS). Based on users' topic of interest, DiscipLink initiates exploratory questions from the perspectives of possible relevant fields of study, and users can further tailor these questions. DiscipLink then supports users in searching and screening papers under selected questions by automatically expanding queries with disciplinary-specific terminologies, extracting themes from retrieved papers, and highlighting the connections between papers and questions. Our evaluation, comprising a within-subject comparative experiment and an open-ended exploratory study, reveals that DiscipLink can effectively support researchers in breaking down disciplinary boundaries and integrating scattered knowledge in diverse fields. The findings underscore the potential of LLM-powered tools in fostering information-seeking practices and bolstering interdisciplinary research.

+

Improving Steering and Verification in AI-Assisted Data Analysis with Interactive Task Decomposition

+

Authors: Majeed Kazemitabaar, Jack Williams, Ian Drosos, Tovi Grossman, Austin Henley, Carina Negreanu, Advait Sarkar

+

Link

+

Abstract: LLM-powered tools like ChatGPT Data Analysis, have the potential to help users tackle the challenging task of data analysis programming, which requires expertise in data processing, programming, and statistics. However, our formative study (n=15) uncovered serious challenges in verifying AI-generated results and steering the AI (i.e., guiding the AI system to produce the desired output). We developed two contrasting approaches to address these challenges. The first (Stepwise) decomposes the problem into step-by-step subgoals with pairs of editable assumptions and code until task completion, while the second (Phasewise) decomposes the entire problem into three editable, logical phases: structured input/output assumptions, execution plan, and code. A controlled, within-subjects experiment (n=18) compared these systems against a conversational baseline. Users reported significantly greater control with the Stepwise and Phasewise systems, and found intervention, correction, and verification easier, compared to the baseline. The results suggest design guidelines and trade-offs for AI-assisted data analysis tools.

+

VizGroup: An AI-assisted Event-driven System for Collaborative Programming Learning Analytics

+

Authors: Xiaohang Tang, Sam Wong, Kevin Pu, Xi Chen, Yalong Yang, Yan Chen

+

Link

+

Abstract: Programming instructors often conduct collaborative learning activities, like Peer Instruction, to foster a deeper understanding in students and enhance their engagement with learning. These activities, however, may not always yield productive outcomes due to the diversity of student mental models and their ineffective collaboration. In this work, we introduce VizGroup, an AI-assisted system that enables programming instructors to easily oversee students' real-time collaborative learning behaviors during large programming courses. VizGroup leverages Large Language Models (LLMs) to recommend event specifications for instructors so that they can simultaneously track and receive alerts about key correlation patterns between various collaboration metrics and ongoing coding tasks. We evaluated VizGroup with 12 instructors in a comparison study using a dataset collected from a Peer Instruction activity that was conducted in a large programming lecture. +The results showed that VizGroup helped instructors effectively overview, narrow down, and track nuances throughout students' behaviors.

+

Who did it? How User Agency is influenced by Visual Properties of Generated Images

+

Authors: Johanna Didion, Krzysztof Wolski, Dennis Wittchen, David Coyle, Thomas Leimkühler, Paul Strohmeier

+

Link

+

Abstract: The increasing proliferation of AI and GenAI requires new interfaces tailored to how their specific affordances and human requirements meet. As GenAI is capable of taking over tasks from users on an unprecedented scale, designing the experience of agency -- if and how users experience control over the process and responsibility over the outcome -- is crucial. As an initial step towards design guidelines for shaping agency, we present a study that explores how features of AI-generated images influence users' experience of agency. We use two measures; temporal binding to implicitly estimate pre-reflective agency and magnitude estimation to assess user judgments of agency. We observe that abstract images lead to more temporal binding than images with semantic meaning. In contrast, the closer an image aligns with what a user might expect, the higher the agency judgment. When comparing the experiment results with objective metrics of image differences, we find that temporal binding results correlate with semantic differences, while agency judgments are better explained by local differences between images. This work contributes towards a future where agency is considered an important design dimension for GenAI interfaces.

+

FathomGPT: A Natural Language Interface for Interactively Exploring Ocean Science Data

+

Authors: Nabin Khanal, Chun Meng Yu, Jui-Cheng Chiu, Anav Chaudhary, Ziyue Zhang, Kakani Katija, Angus Forbes

+

Link

+

Abstract: We introduce FathomGPT, an open source system for the interactive investigation of ocean science data via a natural language interface. FathomGPT was developed in close collaboration with marine scientists to enable researchers and ocean enthusiasts to explore and analyze the FathomNet image database. FathomGPT provides a custom information retrieval pipeline that leverages OpenAI’s large language models to enable: the creation of complex queries to retrieve images, taxonomic information, and scientific measurements; mapping common names and morphological features to scientific names; generating interactive charts on demand; and searching by image or specified patterns within an image. In designing FathomGPT, particular emphasis was placed on enhancing the user's experience by facilitating free-form exploration and optimizing response times. We present an architectural overview and implementation details of FathomGPT, along with a series of ablation studies that demonstrate the effectiveness of our approach to name resolution, fine tuning, and prompt modification. Additionally, we present usage scenarios of interactive data exploration sessions and document feedback from ocean scientists and machine learning experts.

+

VRCopilot: Authoring 3D Layouts with Generative AI Models in VR

+

Authors: Lei Zhang, Jin Pan, Jacob Gettig, Steve Oney, Anhong Guo

+

Link

+

Abstract: Immersive authoring provides an intuitive medium for users to create 3D scenes via direct manipulation in Virtual Reality (VR). Recent advances in generative AI have enabled the automatic creation of realistic 3D layouts. However, it is unclear how capabilities of generative AI can be used in immersive authoring to support fluid interactions, user agency, and creativity. We introduce VRCopilot, a mixed-initiative system that integrates pre-trained generative AI models into immersive authoring to facilitate human-AI co-creation in VR. VRCopilot presents multimodal interactions to support rapid prototyping and iterations with AI, and intermediate representations such as wireframes to augment user controllability over the created content. Through a series of user studies, we evaluated the potential and challenges in manual, scaffolded, and automatic creation in immersive authoring. We found that scaffolded creation using wireframes enhanced the user agency compared to automatic creation. We also found that manual creation via multimodal specification offers the highest sense of creativity and agency.

+

Break Q&A: Validation in AI/ML

+

Natural Expression of a Machine Learning Model's Uncertainty Through Verbal and Non-Verbal Behavior of Intelligent Virtual Agents

+

Authors: Susanne Schmidt, Tim Rolff, Henrik Voigt, Micha Offe, Frank Steinicke

+

Link

+

Abstract: Uncertainty cues are inherent in natural human interaction, as they signal to communication partners how much they can rely on conveyed information. Humans subconsciously provide such signals both verbally (e.g., through expressions such as "maybe" or "I think") and non-verbally (e.g., by diverting their gaze). In contrast, artificial intelligence (AI)-based services and machine learning (ML) models such as ChatGPT usually do not disclose the reliability of answers to their users. +In this paper, we explore the potential of combining ML models as powerful information sources with human means of expressing uncertainty to contextualize the information. We present a comprehensive pipeline that comprises (1) the human-centered collection of (non-)verbal uncertainty cues, (2) the transfer of cues to virtual agent videos, (3) the annotation of videos for perceived uncertainty, and (4) the subsequent training of a custom ML model that can generate uncertainty cues in virtual agent behavior. In a final step (5), the trained ML model is evaluated in terms of both fidelity and generalizability of the generated (non-)verbal uncertainty behavior.

+

Who Validates the Validators? Aligning LLM-Assisted Evaluation of LLM Outputs with Human Preferences

+

Authors: Shreya Shankar, J.D. Zamfirescu-Pereira, Bjoern Hartmann, Aditya Parameswaran, Ian Arawjo

+

Link

+

Abstract: Due to the cumbersome nature of human evaluation and limitations of code-based evaluation, Large Language Models (LLMs) are increasingly being used to assist humans in evaluating LLM outputs. Yet LLM-generated evaluators simply inherit all the problems of the LLMs they evaluate, requiring further human validation. We present a mixed-initiative approach to “validate the validators”— aligning LLM-generated evaluation functions (be it prompts or code) with human requirements. Our interface, EvalGen, provides automated assistance to users in generating evaluation criteria and implementing assertions. While generating candidate implementations (Python functions, LLM grader prompts), EvalGen asks humans to grade a subset of LLM outputs; this feedback is used to select implementations that better align with user grades. A qualitative study finds overall support for EvalGen but underscores the subjectivity and iterative nature of alignment. In particular, we identify a phenomenon we dub criteria drift: users need criteria to grade outputs, but grading outputs helps users define criteria. What is more, some criteria appear dependent on the specific LLM outputs observed (rather than independent and definable a priori), raising serious questions for approaches that assume the independence of evaluation from observation of model outputs. We present our interface and implementation details, a comparison of our algorithm with a baseline approach, and implications for the design of future LLM evaluation assistants.

+

LlamaTouch: A Faithful and Scalable Testbed for Mobile UI Task Automation

+

Authors: Li Zhang, Shihe Wang, Xianqing Jia, Zhihan Zheng, Yunhe Yan, Longxi Gao, Yuanchun Li, Mengwei Xu

+

Link

+

Abstract: The emergent large language/multimodal models facilitate the evolution of mobile agents, especially in mobile UI task automation. However, existing evaluation approaches, which rely on human validation or established datasets to compare agent-predicted actions with predefined action sequences, are unscalable and unfaithful. To overcome these limitations, this paper presents LlamaTouch, a testbed for on-device mobile UI task execution and faithful, scalable task evaluation. By observing that the task execution process only transfers UI states, LlamaTouch employs a novel evaluation approach that only assesses whether an agent traverses all manually annotated, essential application/system states. LlamaTouch comprises three key techniques: (1) On-device task execution that enables mobile agents to interact with realistic mobile environments for task execution. (2) Fine-grained UI component annotation that merges pixel-level screenshots and textual screen hierarchies to explicitly identify and precisely annotate essential UI components with a rich set of designed annotation primitives. (3) A multi-level application state matching algorithm that utilizes exact and fuzzy matching to accurately detect critical information in each screen, even with unpredictable UI layout/content dynamics. LlamaTouch currently incorporates four mobile agents and 496 tasks, encompassing both tasks in the widely-used datasets and our self-constructed ones to cover more diverse mobile applications. Evaluation results demonstrate LlamaTouch’s high faithfulness of evaluation in real-world mobile environments and its better scalability than human validation. LlamaTouch also enables easy task annotation and integration of new mobile agents. Code and dataset are publicly available at https://github.com/LlamaTouch/LlamaTouch.

+

Clarify: Improving Model Robustness With Natural Language Corrections

+

Authors: Yoonho Lee, Michelle Lam, Helena Vasconcelos, Michael Bernstein, Chelsea Finn

+

Link

+

Abstract: The standard way to teach models is by feeding them lots of data. However, this approach often teaches models incorrect ideas because they pick up on misleading signals in the data. To prevent such misconceptions, we must necessarily provide additional information beyond the training data. Prior methods incorporate additional instance-level supervision, such as labels for misleading features or additional labels for debiased data. However, such strategies require a large amount of labeler effort. We hypothesize that people are good at providing textual feedback at the concept level, a capability that existing teaching frameworks do not leverage. We propose Clarify, a novel interface and method for interactively correcting model misconceptions. Through Clarify, users need only provide a short text description of a model's consistent failure patterns. Then, in an entirely automated way, we use such descriptions to improve the training process. Clarify is the first end-to-end system for user model correction. Our user studies show that non-expert users can successfully describe model misconceptions via Clarify, leading to increased worst-case performance in two datasets. We additionally conduct a case study on a large-scale image dataset, ImageNet, using Clarify to find and rectify 31 novel hard subpopulations.

+

"The Data Says Otherwise" – Towards Automated Fact-checking and Communication of Data Claims

+

Authors: Yu Fu, Shunan Guo, Jane Hoffswell, Victor S. Bursztyn, Ryan Rossi, John Stasko

+

Link

+

Abstract: Fact-checking data claims requires data evidence retrieval and analysis, which can become tedious and intractable when done manually. This work presents Aletheia, an automated fact-checking prototype designed to facilitate data claims verification and enhance data evidence communication. For verification, we utilize a pre-trained LLM to parse the semantics for evidence retrieval. To effectively communicate the data evidence, we design representations in two forms: data tables and visualizations, tailored to various data fact types. Additionally, we design interactions that showcase a real-world application of these techniques. We evaluate the performance of two core NLP tasks with a curated dataset comprising 400 data claims and compare the two representation forms regarding viewers’ assessment time, confidence, and preference via a user study with 20 participants. The evaluation offers insights into the feasibility and bottlenecks of using LLMs for data fact-checking tasks, potential advantages and disadvantages of using visualizations over data tables, and design recommendations for presenting data evidence.

+

Break Q&A: Bodily Signals

+

Empower Real-World BCIs with NIRS-X: An Adaptive Learning Framework that Harnesses Unlabeled Brain Signals

+

Authors: Liang Wang, Jiayan Zhang, Jinyang Liu, Devon McKeon, David Guy Brizan, Giles Blaney, Robert Jacob

+

Link

+

Abstract: Brain-Computer Interfaces (BCIs) using functional near-infrared spectroscopy (fNIRS) hold promise for future interactive user interfaces due to their ease of deployment and declining cost. However, they typically require a separate calibration process for each user and task, which can be burdensome. Machine learning helps, but faces a data scarcity problem. Due to inherent inter-user variations in physiological data, it has been typical to create a new annotated training dataset for every new task and user. To reduce dependence on such extensive data collection and labeling, we present an adaptive learning framework, NIRS-X, to harness more easily accessible unlabeled fNIRS data. NIRS-X includes two key components: NIRSiam and NIRSformer. We use the NIRSiam algorithm to extract generalized brain activity representations from unlabeled fNIRS data obtained from previous users and tasks, and then transfer that knowledge to new users and tasks. In conjunction, we design a neural network, NIRSformer, tailored for capturing both local and global, spatial and temporal relationships in multi-channel fNIRS brain input signals. By using unlabeled data from both a previously released fNIRS2MW visual $n$-back dataset and a newly collected fNIRS2MW audio $n$-back dataset, NIRS-X demonstrates its strong adaptation capability to new users and tasks. Results show comparable or superior performance to supervised methods, making NIRS-X promising for real-world fNIRS-based BCIs.

+

Understanding the Effects of Restraining Finger Coactivation in Mid-Air Typing: from a Neuromechanical Perspective

+

Authors: Hechuan Zhang, Xuewei Liang, Ying Lei, Yanjun Chen, Zhenxuan He, Yu Zhang, Lihan Chen, Hongnan Lin, Teng Han, Feng Tian

+

Link

+

Abstract: Typing in mid-air is often perceived as intuitive yet presents challenges due to finger coactivation, a neuromechanical phenomenon that involves involuntary finger movements stemming from the lack of physical constraints. Previous studies were used to examine and address the impacts of finger coactivation using algorithmic approaches. Alternatively, this paper explores the neuromechanical effects of finger coactivation on mid-air typing, aiming to deepen our understanding and provide valuable insights to improve these interactions. We utilized a wearable device that restrains finger coactivation as a prop to conduct two mid-air studies, including a rapid finger-tapping task and a ten-finger typing task. The results revealed that restraining coactivation not only reduced mispresses, which is a classic coactivated error always considered as harm caused by coactivation. Unexpectedly, the reduction of motor control errors and spelling errors, thinking as non-coactivated errors, also be observed. +Additionally, the study evaluated the neural resources involved in motor execution using functional Near Infrared Spectroscopy (fNIRS), which tracked cortical arousal during mid-air typing. The findings demonstrated decreased activation in the primary motor cortex of the left hemisphere when coactivation was restrained, suggesting a diminished motor execution load. This reduction suggests that a portion of neural resources is conserved, which also potentially aligns with perceived lower mental workload and decreased frustration levels.

+

What is Affective Touch Made Of? A Soft Capacitive Sensor Array Reveals the Interplay between Shear, Normal Stress and Individuality

+

Authors: Devyani McLaren, Jian Gao, Xiulun Yin, Rúbia Reis Guerra, Preeti Vyas, Chrys Morton, Xi Laura Cang, Yizhong Chen, Yiyuan Sun, Ying Li, John Madden, Karon MacLean

+

Link

+

Abstract: Humans physically express emotion by modulating parameters that register on mammalian skin mechanoreceptors, but are unavailable in current touch-sensing technology. +Greater sensory richness combined with data on affect-expression composition is a prerequisite to estimating affect from touch, with applications including physical human-robot interaction. To examine shear alongside more easily captured normal stresses, we tailored recent capacitive technology to attain performance suitable for affective touch, creating a flexible, reconfigurable and soft 36-taxel array that detects multitouch normal and 2-dimensional shear at ranges of 1.5kPa-43kPa and $\pm$ 0.3-3.8kPa respectively, wirelessly at ~43Hz (1548 taxels/s). In a deep-learning classification of 9 gestures (N=16), inclusion of shear data improved accuracy to 88\%, compared to 80\% with normal stress data alone, confirming shear stress's expressive centrality. +Using this rich data, we analyse the interplay of sensed-touch features, gesture attributes and individual differences, propose affective-touch sensing requirements, and share technical considerations for performance and practicality.

+

Exploring the Effects of Sensory Conflicts on Cognitive Fatigue in VR Remappings

+

HONORABLE_MENTION

+

Authors: Tianren Luo, Gaozhang Chen, Yijian Wen, Pengxiang Wang, yachun fan, Teng Han, Feng Tian

+

Link

+

Abstract: Virtual reality (VR) is found to present significant cognitive challenges due to its immersive nature and frequent sensory conflicts. This study systematically investigates the impact of sensory conflicts induced by VR remapping techniques on cognitive fatigue, and unveils their correlation. We utilized three remapping methods (haptic repositioning, head-turning redirection, and giant resizing) to create different types of sensory conflicts, and measured perceptual thresholds to induce various intensities of the conflicts. Through experiments involving cognitive tasks along with subjective and physiological measures, we found that all three remapping methods influenced the onset and severity of cognitive fatigue, with visual-vestibular conflict having the greatest impact. Interestingly, visual-experiential/memory conflict showed a mitigating effect on cognitive fatigue, emphasizing the role of novel sensory experiences. This study contributes to a deeper understanding of cognitive fatigue under sensory conflicts and provides insights for designing VR experiences that align better with human perceptual and cognitive capabilities.

+

Break Q&A: Future Fabrics

+

ScrapMap: Interactive Color Layout for Scrap Quilting

+

Authors: Mackenzie Leake, Ross Daly

+

Link

+

Abstract: Scrap quilting is a popular sewing process that involves combining leftover pieces of fabric into traditional patchwork designs. Imagining the possibilities for these leftovers and arranging the fabrics in such a way that achieves visual goals, such as high contrast, can be challenging given the large number of potential fabric assignments within the quilt's design. We formulate the task of designing a scrap quilt as a graph coloring problem with domain-specific coloring and material constraints. Our interactive tool called ScrapMap helps quilters explore these potential designs given their available materials by leveraging the hierarchy of scrap quilt construction (e.g., quilt blocks and motifs) and providing user-directed automatic block coloring suggestions. Our user evaluation indicates that quilters find ScrapMap useful for helping them consider new ways to use their scraps and create visually striking quilts.

+

What's in a cable? Abstracting Knitting Design Elements with Blended Raster/Vector Primitives

+

Authors: Hannah Twigg-Smith, Yuecheng Peng, Emily Whiting, Nadya Peek

+

Link

+

Abstract: In chart-based programming environments for machine knitting, patterns are specified at a low level by placing operations on a grid. This highly manual workflow makes it challenging to iterate on design elements such as cables, colorwork, and texture. While vector-based abstractions for knitting design elements may facilitate higher-level manipulation, they often include interdependencies which require stitch-level reconciliation. To address this, we contribute a new way of specifying knits with blended vector and raster primitives. Our abstraction supports the design of interdependent elements like colorwork and texture. We have implemented our blended raster/vector specification in a direct manipulation design tool where primitives are layered and rasterized, allowing for simulation of the resulting knit structure and generation of machine instructions. Through examples, we show how our approach enables higher-level manipulation of various knitting techniques, including intarsia colorwork, short rows, and cables. Specifically, we show how our tool supports the design of complex patterns including origami pleat patterns and capacitive sensor patches.

+

Embrogami: Shape-Changing Textiles with Machine Embroidery

+

Authors: Yu Jiang, Alice Haynes, Narjes Pourjafarian, Jan Borchers, Jürgen Steimle

+

Link

+

Abstract: Machine embroidery is a versatile technique for creating custom and entirely fabric-based patterns on thin and conformable textile surfaces. However, existing machine-embroidered surfaces remain static, limiting the interactions they can support. We introduce Embrogami, an approach for fabricating textile structures with versatile shape-changing behaviors. Inspired by origami, we leverage machine embroidery to form finger-tip-scale mountain-and-valley structures on textiles with customized shapes, bistable or elastic behaviors, and modular composition. The structures can be actuated by the user or the system to modify the local textile surface topology, creating interactive elements like toggles and sliders or textile shape displays with an ultra-thin, flexible, and integrated form factor. We provide a dedicated software tool and report results of technical experiments to allow users to flexibly design, fabricate, and deploy customized Embrogami structures. With four application cases, we showcase Embrogami’s potential to create functional and flexible shape-changing textiles with diverse visuo-tactile feedback.

+

KODA: Knit-program Optimization by Dependency Analysis

+

Authors: Megan Hofmann

+

Link

+

Abstract: Digital knitting machines have the capability to reliably manufacture seamless, textured, and multi-material garments, but these capabilities are obscured by limiting CAD tools. Recent innovations in computational knitting build on emerging programming infrastructure that gives full access to the machine's capabilities but requires an extensive understanding of machine operations and execution. In this paper, we contribute a critical missing piece of the knitting-machine programming pipeline--a program optimizer. Program optimization allows programmers to focus on developing novel algorithms that produce desired fabrics while deferring concerns of efficient machine operations to the optimizer. We present KODA, the Knit-program Optimization by Dependency Analysis method. KODA re-orders and reduces machine instructions to reduce knitting time, increase knitting reliability, and manage boilerplate operations that adjust the machine state. The result is a system that enables programmers to write readable and intuitive knitting algorithms while producing efficient and verified programs.

+

X-Hair: 3D Printing Hair-like Structures with Multi-form, Multi-property and Multi-function

+

Authors: Guanyun Wang, Junzhe Ji, Yunkai Xu, Lei Ren, Xiaoyang Wu, Chunyuan Zheng, Xiaojing Zhou, Xin Tang, Boyu Feng, Lingyun Sun, Ye Tao, Jiaji Li

+

Link

+

Abstract: In this paper, we present X-Hair, a method that enables 3D-printed hair with various forms, properties, and functions. We developed a two-step suspend printing strategy to fabricate hair-like structures in different forms (e.g. fluff, bristle, barb) by adjusting parameters including Extrusion Length Ratio and Total Length. Moreover, a design tool is also established for users to customize hair-like structures with various properties (e.g. pointy, stiff, soft) on imported 3D models, which virtually shows the results for previewing and generates G-code files for 3D printing. We demonstrate the design space of X-Hair and evaluate the properties of them with different parameters. Through a series of applications with hair-like structures, we validate X-hair's practical usage of biomimicry, decoration, heat preservation, adhesion, and haptic interaction.

+

TouchpadAnyWear: Textile-Integrated Tactile Sensors for Multimodal High Spatial-Resolution Touch Inputs with Motion Artifacts Tolerance

+

Authors: Junyi Zhao, Pornthep Preechayasomboon, Tyler Christensen, Amirhossein H. Memar, Zhenzhen Shen, Nick Colonnese, Michael Khbeis, Mengjia Zhu

+

Link

+

Abstract: This paper presents TouchpadAnyWear, a novel family of textile-integrated force sensors capable of multi-modal touch input, encompassing micro-gesture detection, two-dimensional (2D) continuous input, and force-sensitive strokes. This thin (\textless 1.5~mm) and conformal device features high spatial resolution sensing and motion artifact tolerance through its unique capacitive sensor architecture. The sensor consists of a knitted textile compressive core, sandwiched by stretchable silver electrodes, and conductive textile shielding layers on both sides. With a high-density sensor pixel array (25/cm\textsuperscript{2}), TouchpadAnyWear can detect touch input locations and sizes with millimeter-scale spatial resolution and a wide range of force inputs (0.05~N to 20~N). The incorporation of miniature polymer domes, referred to as ``poly-islands'', onto the knitted textile locally stiffens the sensing areas, thereby reducing motion artifacts during deformation. These poly-islands also provide passive tactile feedback to users, allowing for eyes-free localization of the active sensing pixels. Design choices and sensor performance are evaluated using in-depth mechanical characterization. Demonstrations include an 8-by-8 grid sensor as a miniature high-resolution touchpad and a T-shaped sensor for thumb-to-finger micro-gesture input. User evaluations validate the effectiveness and usability of TouchpadAnyWear in daily interaction contexts, such as tapping, forceful pressing, swiping, 2D cursor control, and 2D stroke-based gestures. This paper further discusses potential applications and explorations for TouchpadAnyWear in wearable smart devices, gaming, and augmented reality devices.

+

Break Q&A: Dynamic Objects & Materials

+

MagneDot: Integrated Fabrication and Actuation Methods of Dot-Based Magnetic Shape Displays

+

Authors: Lingyun Sun, Yitao Fan, Boyu Feng, Yifu Zhang, Deying Pan, Yiwen Ren, Yuyang Zhang, Qi Wang, Ye Tao, Guanyun Wang

+

Link

+

Abstract: This paper presents MagneDot, a novel method for making interactive magnetic shape displays through an integrated fabrication process. Magnetic soft materials can potentially create fast, responsive morphing structures for interactions. However, novice users and designers typically do not have access to sophisticated equipment and materials or cannot afford heavy labor to create interactive objects based on this material. Modified from an open-source 3D printer, the fabrication system of MagneDot integrates the processes of mold-making, pneumatic extrusion, magnetization, and actuation, using cost-effective materials only. By providing a design tool, MagneDot allows users to generate G-codes for fabricating and actuating displays of various morphing effects. Finally, a series of design examples demonstrate the possibilities of shape displays enabled by MagneDot.

+

CARDinality: Interactive Card-shaped Robots with Locomotion and Haptics using Vibration

+

Authors: Aditya Retnanto, Emilie Faracci, Anup Sathya, Yu-Kai Hung, Ken Nakagaki

+

Link

+

Abstract: This paper introduces a novel approach to interactive robots by leveraging the form-factor of cards to create thin robots equipped with vibrational capabilities for locomotion and haptic feedback. The system is composed of flat-shaped robots with on-device sensing and wireless control, which offer lightweight portability and scalability. This research introduces a hardware prototype to explore the possibility of ‘vibration-based omni-directional sliding locomotion’. Applications include augmented card playing, educational tools, and assistive technology, which showcase CARDinality’s versatility in tangible interaction.

+

PortaChrome: A Portable Contact Light Source for Integrated Re-Programmable Multi-Color Textures

+

Authors: Yunyi Zhu, Cedric Honnet, Yixiao Kang, Junyi Zhu, Angelina Zheng, Kyle Heinz, Grace Tang, Luca Musk, Michael Wessely, Stefanie Mueller

+

Link

+

Abstract: In this paper, we present PortaChrome, a portable light source that can be attached to everyday objects to reprogram the color and texture of surfaces that come in contact with them. When PortaChrome makes contact with objects previously coated with photochromic dye, the UV and RGB LEDs inside PortaChrome create multi-color textures on the objects. In contrast to prior work, which used projectors for the color-change, PortaChrome has a thin and flexible form factor, which allows the color-change process to be integrated into everyday user interaction. Because of the close distance between the light source and the photochromic object, PortaChrome creates color textures in less than 4 minutes on average, which is 8 times faster than prior work. We demonstrate PortaChrome with four application examples, including data visualizations on textiles and dynamic designs on wearables.

+

Augmented Object Intelligence with XR-Objects

+

Authors: Mustafa Doga Dogan, Eric Gonzalez, Karan Ahuja, Ruofei Du, Andrea Colaço, Johnny Lee, Mar Gonzalez-Franco, David Kim

+

Link

+

Abstract: Seamless integration of physical objects as interactive digital entities remains a challenge for spatial computing. This paper explores Augmented Object Intelligence (AOI) in the context of XR, an interaction paradigm that aims to blur the lines between digital and physical by equipping real-world objects with the ability to interact as if they were digital, where every object has the potential to serve as a portal to digital functionalities. Our approach utilizes real-time object segmentation and classification, combined with the power of Multimodal Large Language Models (MLLMs), to facilitate these interactions without the need for object pre-registration. We implement the AOI concept in the form of XR-Objects, an open-source prototype system that provides a platform for users to engage with their physical environment in contextually relevant ways using object-based context menus. This system enables analog objects to not only convey information but also to initiate digital actions, such as querying for details or executing tasks. Our contributions are threefold: (1) we define the AOI concept and detail its advantages over traditional AI assistants, (2) detail the XR-Objects system’s open-source design and implementation, and (3) show its versatility through various use cases and a user study.

+

Break Q&A: Prototyping

+

ProtoDreamer: A Mixed-prototype Tool Combining Physical Model and Generative AI to Support Conceptual Design

+

Authors: Hongbo ZHANG, Pei Chen, Xuelong Xie, Chaoyi Lin, Lianyan Liu, Zhuoshu Li, Weitao You, Lingyun Sun

+

Link

+

Abstract: Prototyping serves as a critical phase in the industrial conceptual design process, enabling exploration of problem space and identification of solutions. Recent advancements in large-scale generative models have enabled AI to become a co-creator in this process. However, designers often consider generative AI challenging due to the necessity to follow computer-centered interaction rules, diverging from their familiar design materials and languages. Physical prototype is a commonly used design method, offering unique benefits in prototype process, such as intuitive understanding and tangible testing. In this study, we propose ProtoDreamer, a mixed-prototype tool that synergizes generative AI with physical prototype to support conceptual design. ProtoDreamer allows designers to construct preliminary prototypes using physical materials, while AI recognizes these forms and vocal inputs to generate diverse design alternatives. This tool empowers designers to tangibly interact with prototypes, intuitively convey design intentions to AI, and continuously draw inspiration from the generated artifacts. An evaluation study confirms ProtoDreamer’s utility and strengths in time efficiency, creativity support, defects exposure, and detailed thinking facilitation.

+

TorqueCapsules: Fully-Encapsulated Flywheel Actuation Modules for Designing and Prototyping Movement-Based and Kinesthetic Interaction

+

Authors: Willa Yunqi Yang, Yifan Zou, Jingle Huang, Raouf Abujaber, Ken Nakagaki

+

Link

+

Abstract: Flywheels are unique, versatile actuators that store and convert kinetic energy to torque, widely utilized in aerospace, robotics, haptics, and more. However, prototyping interaction using flywheels is not trivial due to safety concerns, unintuitive operation, and implementation challenges. +We present TorqueCapsules: self-contained, fully-encapsulated flywheel actuation modules that make the flywheel actuators easy to control, safe to interact with, and quick to reconfigure and customize. By fully encapsulating the actuators with a wireless microcontroller, a battery, and other components, the module can be readily attached, embedded, or stuck to everyday objects, worn to people’s bodies, or combined with other devices. With our custom GUI, both novices and expert users can easily control multiple modules to design and prototype movements and kinesthetic haptics unique to flywheel actuation. We demonstrate various applications, including actuated everyday objects, wearable haptics, and expressive robots. We conducted workshops for novices and experts to employ TorqueCapsules to collect qualitative feedback and further application examples.

+

AniCraft: Crafting Everyday Objects as Physical Proxies for Prototyping 3D Character Animation in Mixed Reality

+

Authors: Boyu Li, Linping Yuan, Zhe Yan, Qianxi Liu, Yulin Shen, Zeyu Wang

+

Link

+

Abstract: We introduce AniCraft, a mixed reality system for prototyping 3D character animation using physical proxies crafted from everyday objects. Unlike existing methods that require specialized equipment to support the use of physical proxies, AniCraft only requires affordable markers, webcams, and daily accessible objects and materials. AniCraft allows creators to prototype character animations through three key stages: selection of virtual characters, fabrication of physical proxies, and manipulation of these proxies to animate the characters. This authoring workflow is underpinned by diverse physical proxies, manipulation types, and mapping strategies, which ease the process of posing virtual characters and mapping user interactions with physical proxies to animated movements of virtual characters. We provide a range of cases and potential applications to demonstrate how diverse physical proxies can inspire user creativity. User experiments show that our system can outperform traditional animation methods for rapid prototyping. Furthermore, we provide insights into the benefits and usage patterns of different materials, which lead to design implications for future research.

+

Mul-O: Encouraging Olfactory Innovation in Various Scenarios Through a Task-Oriented Development Platform

+

Authors: Peizhong Gao, Fan Liu, Di Wen, Yuze Gao, Linxin Zhang, Chikelei Wang, Qiwei Zhang, Yu Zhang, Shao-en Ma, Qi Lu, Haipeng Mi, YINGQING XU

+

Link

+

Abstract: Olfactory interfaces are pivotal in HCI, yet their development is hindered by limited application scenarios, stifling the discovery of new research opportunities. This challenge primarily stems from existing design tools focusing predominantly on odor display devices and the creation of standalone olfactory experiences, rather than enabling rapid adaptation to various contexts and tasks. Addressing this, we introduce Mul-O, a novel task-oriented development platform crafted to aid semi-professionals in navigating the diverse requirements of potential application scenarios and effectively prototyping ideas. +Mul-O facilitates the swift association and integration of olfactory experiences into functional designs, system integrations, and concept validations. Comprising a web UI for task-oriented development, an API server for seamless third-party integration, and wireless olfactory display hardware, Mul-O significantly enhances the ideation and prototyping process in multisensory tasks. This was verified by a 15-day workshop attended by 30 participants. The workshop produced seven innovative projects, underscoring Mul-O's efficacy in fostering olfactory innovation.

+

Break Q&A: New Vizualizations

+

VisCourt: In-Situ Guidance for Interactive Tactic Training in Mixed Reality

+

Authors: Liqi Cheng, Hanze Jia, Lingyun Yu, Yihong Wu, Shuainan Ye, Dazhen Deng, Hui Zhang, Xiao Xie, Yingcai Wu

+

Link

+

Abstract: In team sports like basketball, understanding and executing tactics---coordinated plans of movements among players---are crucial yet complex, requiring extensive practice. These tactics require players to develop a keen sense of spatial and situational awareness. Traditional coaching methods, which mainly rely on basketball tactic boards and video instruction, often fail to bridge the gap between theoretical learning and the real-world application of tactics, due to shifts in view perspectives and a lack of direct experience with tactical scenarios. To address this challenge, we introduce VisCourt, a Mixed Reality (MR) tactic training system, in collaboration with a professional basketball team. To set up the MR training environment, we employed semi-automatic methods to simulate realistic 3D tactical scenarios and iteratively designed visual in-situ guidance. This approach enables full-body engagement in interactive training sessions on an actual basketball court and provides immediate feedback, significantly enhancing the learning experience. A user study with athletes and enthusiasts shows the effectiveness and satisfaction with VisCourt in basketball training and offers insights for the design of future SportsXR training systems.

+

Block and Detail: Scaffolding Sketch-to-Image Generation

+

Authors: Vishnu Sarukkai, Lu Yuan, Mia Tang, Maneesh Agrawala, Kayvon Fatahalian

+

Link

+

Abstract: We introduce a novel sketch-to-image tool that aligns with the iterative refinement process of artists. Our tool lets users sketch blocking strokes to coarsely represent the placement and form of objects and detail strokes to refine their shape and silhouettes. We develop a two-pass algorithm for generating high-fidelity images from such sketches at any point in the iterative process. In the first pass we use a ControlNet to generate an image that strictly follows all the strokes (blocking and detail) and in the second pass we add variation by renoising regions surrounding blocking strokes. We also present a dataset generation scheme that, when used to train a ControlNet architecture, allows regions that do not contain strokes to be interpreted as not-yet-specified regions rather than empty space. We show that this partial-sketch-aware ControlNet can generate coherent elements from partial sketches that only contain a small number of strokes. The high-fidelity images produced by our approach serve as scaffolds that can help the user adjust the shape and proportions of objects or add additional elements to the composition. We demonstrate the effectiveness of our approach with a variety of examples and evaluative comparisons. Quantitatively, novice viewers prefer the quality of images from our algorithm over a baseline Scribble ControlNet for 82% of the pairs and found our images had less distortion in 80% of the pairs.

+

EVE: Enabling Anyone to Train Robots using Augmented Reality

+

Authors: Jun Wang, Chun-Cheng Chang, Jiafei Duan, Dieter Fox, Ranjay Krishna

+

Link

+

Abstract: The increasing affordability of robot hardware is accelerating the integration of robots into everyday activities. However, training a robot to automate a task requires expensive trajectory data where a trained human annotator moves a physical robot to train it. Consequently, only those with access to robots produce demonstrations to train robots. In this work, we remove this restriction with EVE, an iOS app that enables everyday users to train robots using intuitive augmented reality visualizations, without needing a physical robot. With EVE, users can collect demonstrations by specifying waypoints with their hands, visually inspecting the environment for obstacles, modifying existing waypoints, and verifying collected trajectories. In a user study (N=14, D=30) consisting of three common tabletop tasks, EVE outperformed three state-of-the-art interfaces in success rate and was comparable to kinesthetic teaching—physically moving a physical robot—in completion time, usability, motion intent communication, enjoyment, and preference (mean of p=0.30). EVE allows users to train robots for personalized tasks, such as sorting desk supplies, organizing ingredients, or setting up board games. We conclude by enumerating limitations and design considerations for future AR-based demonstration collection systems for robotics.

+

avaTTAR: Table Tennis Stroke Training with On-body and Detached Visualization in Augmented Reality

+

Authors: Dizhi Ma, Xiyun Hu, Jingyu Shi, Mayank Patel, Rahul Jain, Ziyi Liu, Zhengzhe Zhu, Karthik Ramani

+

Link

+

Abstract: Table tennis stroke training is a critical aspect of player development. We designed a new augmented reality (AR) system, avaTTAR, for table tennis stroke training. The system provides both “on-body” (first-person view) and “detached” (third-person view) +visual cues, enabling users to visualize target strokes and correct their attempts effectively with this dual perspectives setup. By employing a combination of pose estimation algorithms and IMU sensors, avaTTAR captures and reconstructs the 3D body pose and paddle orientation of users during practice, allowing real-time comparison with expert strokes. Through a user study, we affirm avaTTAR ’s capacity to amplify player experience and training results

+

Break Q&A: Movement-based UIs

+

Feminist Interaction Techniques: Social Consent Signals to Deter NCIM Screenshots

+

Authors: Li Qiwei, Francesca Lameiro, Shefali Patel, Cristi Isaula-Reyes, Eytan Adar, Eric Gilbert, Sarita Schoenebeck

+

Link

+

Abstract: Non-consensual Intimate Media (NCIM) refers to the distribution of sexual or intimate content without consent. NCIM is common and causes significant emotional, financial, and reputational harm. We developed Hands-Off, an interaction technique for messaging applications that deters non-consensual screenshots. Hands-Off requires recipients to perform a hand gesture in the air, above the device, to unlock media—which makes simultaneous screenshotting difficult. A lab study shows that Hands-Off gestures are easy +to perform and reduce non-consensual screenshots by 67%. We conclude by generalizing this approach and introduce the idea of Feminist Interaction Techniques (FIT), interaction techniques that encode feminist values and speak to societal problems, and reflect on FIT’s opportunities and limitations.

+

Effects of Computer Mouse Lift-off Distance Settings in Mouse Lifting Action

+

Authors: Munjeong Kim, Sunjun Kim

+

Link

+

Abstract: This study investigates the effect of Lift-off Distance (LoD) on a computer mouse, which refers to the height at which a mouse sensor stops tracking when lifted off the surface. Although a low LoD is generally preferred to avoid unintentional cursor movement in mouse lifting (=clutching), especially in first-person shooter games, it may reduce tracking stability. +We conducted a psychophysical experiment to measure the perceptible differences between LoD levels and quantitatively measured the unintentional cursor movement error and tracking stability at four levels of LoD while users performed mouse lifting. The results showed a trade-off between movement error and tracking stability at varying levels of LoD. Our findings offer valuable information on optimal LoD settings, which could serve as a guide for choosing a proper mouse device for enthusiastic gamers.

+

DisMouse: Disentangling Information from Mouse Movement Data

+

Authors: Guanhua Zhang, Zhiming Hu, Andreas Bulling

+

Link

+

Abstract: Mouse movement data contain rich information about users, performed tasks, and user interfaces, but separating the respective components remains challenging and unexplored. As a first step to address this challenge, we propose DisMouse – the first method to disentangle user-specific and user-independent information and stochastic variations from mouse movement data. At the core of our method is an autoencoder trained in a semi-supervised fashion, consisting of a self-supervised denoising diffusion process and a supervised contrastive user identification module. Through evaluations on three datasets, we show that DisMouse 1) captures complementary information of mouse input, hence providing an interpretable framework for modelling mouse movements, 2) can be used to produce refined features, thus enabling various applications such as personalised and variable mouse data generation, and 3) generalises across different datasets. Taken together, our results underline the significant potential of disentangled representation learning for explainable, controllable, and generalised mouse behaviour modelling.

+

Wheeler: A Three-Wheeled Input Device for Usable, Efficient, and Versatile Non-Visual Interaction

+

HONORABLE_MENTION

+

Authors: Md Touhidul Islam, Noushad Sojib, Imran Kabir, Ashiqur Rahman Amit, Mohammad Ruhul Amin, Syed Masum Billah

+

Link

+

Abstract: Blind users rely on keyboards and assistive technologies like screen readers to interact with user interface (UI) elements. In modern applications with complex UI hierarchies, navigating to different UI elements poses a significant accessibility challenge. Users must listen to screen reader audio descriptions and press relevant keyboard keys one at a time. This paper introduces Wheeler, a novel three-wheeled, mouse-shaped stationary input device, to address this issue. Informed by participatory sessions, Wheeler enables blind users to navigate up to three hierarchical levels in an app independently using three wheels instead of navigating just one level at a time using a keyboard. The three wheels also offer versatility, allowing users to repurpose them for other tasks, such as 2D cursor manipulation. A study with 12 blind users indicates a significant reduction (40%) in navigation time compared to using a keyboard. Further, a diary study with our blind co-author highlights Wheeler's additional benefits, such as accessing UI elements with partial metadata and facilitating mixed-ability collaboration.

+

Break Q&A: Sound & Music

+

SonoHaptics: An Audio-Haptic Cursor for Gaze-Based Object Selection in XR

+

Authors: Hyunsung Cho, Naveen Sendhilnathan, Michael Nebeling, Tianyi Wang, Purnima Padmanabhan, Jonathan Browder, David Lindlbauer, Tanya Jonker, Kashyap Todi

+

Link

+

Abstract: We introduce SonoHaptics, an audio-haptic cursor for gaze-based 3D object selection. SonoHaptics addresses challenges around providing accurate visual feedback during gaze-based selection in Extended Reality (XR), e.g., lack of world-locked displays in no- or limited-display smart glasses and visual inconsistencies. To enable users to distinguish objects without visual feedback, SonoHaptics employs the concept of cross-modal correspondence in human perception to map visual features of objects (color, size, position, material) to audio-haptic properties (pitch, amplitude, direction, timbre). We contribute data-driven models for determining cross-modal mappings of visual features to audio and haptic features, and a computational approach to automatically generate audio-haptic feedback for objects in the user's environment. SonoHaptics provides global feedback that is unique to each object in the scene, and local feedback to amplify differences between nearby objects. Our comparative evaluation shows that SonoHaptics enables accurate object identification and selection in a cluttered scene without visual feedback.

+

SonifyAR: Context-Aware Sound Generation in Augmented Reality

+

Authors: Xia Su, Jon Froehlich, Eunyee Koh, Chang Xiao

+

Link

+

Abstract: Sound plays a crucial role in enhancing user experience and immersiveness in Augmented Reality (AR). However, current platforms lack support for AR sound authoring due to limited interaction types, challenges in collecting and specifying context information, and difficulty in acquiring matching sound assets. We present SonifyAR, an LLM-based AR sound authoring system that generates context-aware sound effects for AR experiences. SonifyAR expands the current design space of AR sound and implements a Programming by Demonstration (PbD) pipeline to automatically collect contextual information of AR events, including virtual-content-semantics and real-world context. This context information is then processed by a large language model to acquire sound effects with Recommendation, Retrieval, Generation, and Transfer methods. To evaluate the usability and performance of our system, we conducted a user study with eight participants and created five example applications, including an AR-based science experiment, and an assistive application for low-vision AR users.

+

Auptimize: Optimal Placement of Spatial Audio Cues for Extended Reality

+

Authors: Hyunsung Cho, Alexander Wang, Divya Kartik, Emily Xie, Yukang Yan, David Lindlbauer

+

Link

+

Abstract: Spatial audio in Extended Reality (XR) provides users with better awareness of where virtual elements are placed, and efficiently guides them to events such as notifications, system alerts from different windows, or approaching avatars. Humans, however, are inaccurate in localizing sound cues, especially with multiple sources due to limitations in human auditory perception such as angular discrimination error and front-back confusion. This decreases the efficiency of XR interfaces because users misidentify from which XR element a sound is coming. To address this, we propose Auptimize, a novel computational approach for placing XR sound sources, which mitigates such localization errors by utilizing the ventriloquist effect. Auptimize disentangles the sound source locations from the visual elements and relocates the sound sources to optimal positions for unambiguous identification of sound cues, avoiding errors due to inter-source proximity and front-back confusion. Our evaluation shows that Auptimize decreases spatial audio-based source identification errors compared to playing sound cues at the paired visual-sound locations. We demonstrate the applicability of Auptimize for diverse spatial audio-based interactive XR scenarios.

+

EarHover: Mid-Air Gesture Recognition for Hearables Using Sound Leakage Signals

+

BEST_PAPER

+

Authors: Shunta Suzuki, Takashi Amesaka, Hiroki Watanabe, Buntarou Shizuki, Yuta Sugiura

+

Link

+

Abstract: We introduce EarHover, an innovative system that enables mid-air gesture input for hearables. Mid-air gesture input, which eliminates the need to touch the device and thus helps to keep hands and the device clean, has been known to have high demand based on previous surveys. However, existing mid-air gesture input methods for hearables have been limited to adding cameras or infrared sensors. By focusing on the sound leakage phenomenon unique to hearables, we have realized mid-air gesture recognition using a speaker and an external microphone that are highly compatible with hearables. The signal leaked to the outside of the device due to sound leakage can be measured by an external microphone, which detects the differences in reflection characteristics caused by the hand's speed and shape during mid-air gestures. +Among 27 types of gestures, we determined the seven most suitable gestures for EarHover in terms of signal discrimination and user acceptability. We then evaluated the gesture detection and classification performance of two prototype devices (in-ear type/open-ear type) for real-world application scenarios.

+

Towards Music-Aware Virtual Assistants

+

Authors: Alexander Wang, David Lindlbauer, Chris Donahue

+

Link

+

Abstract: We propose a system for modifying spoken notifications in a manner that is sensitive to the music a user is listening to. Spoken notifications provide convenient access to rich information without the need for a screen. Virtual assistants see prevalent use in hands-free settings such as driving or exercising, activities where users also regularly enjoy listening to music. In such settings, virtual assistants will temporarily mute a user's music to improve intelligibility. However, users may perceive these interruptions as intrusive, negatively impacting their music-listening experience. To address this challenge, we propose the concept of music-aware virtual assistants, where speech notifications are modified to resemble a voice singing in harmony with the user's music. We contribute a system that processes user music and notification text to produce a blended mix, replacing original song lyrics with the notification content. In a user study comparing musical assistants to standard virtual assistants, participants expressed that musical assistants fit better with music, reduced intrusiveness, and provided a more delightful listening experience overall.

\ No newline at end of file diff --git a/sigchi2/UIST_2024_program.json b/sigchi2/UIST_2024_program.json new file mode 100644 index 0000000..b94f9da --- /dev/null +++ b/sigchi2/UIST_2024_program.json @@ -0,0 +1,38546 @@ +{ + "schemeVersion": 7, + "cc_licence": "Content of this file is licensed under a CC BY-NC-SA 4.0 license. For details see https://creativecommons.org/licenses/by-nc-sa/4.0/", + "conference": { + "id": 10121, + "shortName": "UIST", + "displayShortName": "", + "year": 2024, + "startDate": 1728777600000, + "endDate": 1729036800000, + "fullName": "37th Annual ACM Symposium on User Interface Software and Technology", + "url": "https://uist.acm.org/2024/", + "location": "Pittsburgh, PA, USA", + "timeZoneOffset": -240, + "timeZoneName": "America/New_York", + "logoUrl": "https://files.sigchi.org/conference/logo/10121/eae4785f-b23c-5fa9-5dd5-5d2bab3bed3d.png", + "name": "UIST 2024" + }, + "publicationInfo": { + "hideLinksBeforeConference": true, + "version": 30, + "publicationStatus": "PUBLISHED", + "isProgramEnabled": true, + "isDraft": true, + "isRegistrationEnabled": false, + "publicationDate": "2024-10-07 21:08:17+00" + }, + "sponsors": [ + { + "id": 10615, + "name": "Meta Reality Labs", + "logoUrl": "https://files.sigchi.org/conference/sponsor/10121/logo/0d07e00a-a214-7a05-e14e-4bb486ffd0c1.png", + "levelId": 10371, + "url": "https://about.meta.com/realitylabs/", + "order": 0, + "extraPadding": 8 + }, + { + "id": 10616, + "name": "Adobe", + "logoUrl": "https://files.sigchi.org/conference/sponsor/10121/logo/5e3c2635-709c-e8a3-c9f7-2105fc566d7a.png", + "levelId": 10372, + "url": "https://www.adobe.com/", + "order": 0, + "extraPadding": 8 + }, + { + "id": 10617, + "name": "Apple", + "logoUrl": "https://files.sigchi.org/conference/sponsor/10121/logo/6297b2be-aeac-a617-9cc3-2f8f87bb9465.png", + "levelId": 10372, + "url": "https://www.apple.com/", + "order": 1, + "extraPadding": 8 + }, + { + "id": 10618, + "name": "Google", + "logoUrl": "https://files.sigchi.org/conference/sponsor/10121/logo/53114eb2-b29a-d0db-6283-e1e9cf0e8f5b.png", + "levelId": 10372, + "url": "https://www.google.com/", + "order": 2, + "extraPadding": 8 + }, + { + "id": 10619, + "name": "Autodesk", + "logoUrl": "https://files.sigchi.org/conference/sponsor/10121/logo/a659ab26-c4b5-cd05-b598-1124dab41049.png", + "levelId": 10372, + "url": "https://www.autodesk.com/", + "order": 3, + "extraPadding": 8 + } + ], + "sponsorLevels": [ + { + "id": 10370, + "name": "Sponsors", + "rank": 1, + "isDefault": true + }, + { + "id": 10371, + "name": "Platinum Sponsors", + "rank": 2, + "isDefault": false + }, + { + "id": 10372, + "name": "Silver Sponsors", + "rank": 3, + "isDefault": false + } + ], + "floors": [ + { + "id": 10353, + "name": "Westin Hotel: Allegheny (3rd floor)", + "mapImageUrl": "https://files.sigchi.org/conference/floor/10121/2a7bb69e-5303-14f5-9eb0-4552b132f0cb.png", + "roomIds": [ + 11734, + 11735, + 11733, + 11739, + 11738 + ] + } + ], + "rooms": [ + { + "id": 11733, + "name": "Allegheny 3", + "setup": "THEATRE", + "typeId": 13748 + }, + { + "id": 11734, + "name": "Allegheny 1", + "setup": "THEATRE", + "typeId": 13748 + }, + { + "id": 11735, + "name": "Allegheny 2", + "setup": "THEATRE", + "typeId": 13748 + }, + { + "id": 11736, + "name": "August Wilson African American Cultural Center", + "setup": "THEATRE", + "typeId": 13746 + }, + { + "id": 11737, + "name": "CMU Campus: Gates Hillman Center, 4405", + "setup": "THEATRE", + "typeId": 13745 + }, + { + "id": 11738, + "name": "Westin Hotel: Allegheny Grand Ballroom", + "setup": "THEATRE", + "typeId": 13758 + }, + { + "id": 11739, + "name": "Allegheny Foyer", + "setup": "THEATRE", + "typeId": 13749 + }, + { + "id": 11741, + "name": "Omni William Penn Hotel Grand Ballroom", + "setup": "SPECIAL", + "typeId": 13746, + "note": "" + }, + { + "id": 11799, + "name": "CMU Campus: Gates Hillman Center, 6115", + "setup": "THEATRE", + "typeId": 13751 + }, + { + "id": 11800, + "name": "CMU Campus: Gates Hillman Center, 6501", + "setup": "THEATRE", + "typeId": 13751 + }, + { + "id": 11802, + "name": "CMU Gates Hilman center 4th floor", + "setup": "THEATRE", + "typeId": 13746, + "note": "" + }, + { + "id": 11803, + "name": "CMU Campus: Newell Simon Hall, 3305", + "setup": "THEATRE", + "typeId": 13751 + }, + { + "id": 11804, + "name": "CMU Campus: Newell Simon Hall, 4305", + "setup": "THEATRE", + "typeId": 13751 + } + ], + "tracks": [ + { + "id": 13017, + "typeId": 13758 + }, + { + "id": 13201, + "name": "UIST 2024 Visions", + "typeId": 13755 + }, + { + "id": 13202, + "name": "UIST 2024 Doctoral Symposium", + "typeId": 13745 + }, + { + "id": 13203, + "name": "UIST 2024 Student Innovation Contest", + "typeId": 13756 + }, + { + "id": 13204, + "name": "UIST 2024 Posters", + "typeId": 13749 + }, + { + "id": 13205, + "name": "UIST 2024 Demos", + "typeId": 13744 + }, + { + "id": 13206, + "name": "UIST 2024 Papers", + "typeId": 13748 + }, + { + "id": 13207, + "name": "UIST 2024 TOCHI", + "typeId": 13757 + }, + { + "id": 13208, + "name": "UIST 2024 Workshops", + "typeId": 13751 + } + ], + "contentTypes": [ + { + "id": 13743, + "name": "Course", + "displayName": "Courses", + "color": "#66c2a4", + "duration": 0 + }, + { + "id": 13744, + "name": "Demo", + "displayName": "Demos", + "color": "#006d2c", + "duration": 180 + }, + { + "id": 13745, + "name": "Doctoral Symposium", + "color": "#6baed6", + "duration": 540 + }, + { + "id": 13746, + "name": "Event", + "displayName": "Events", + "color": "#ffc034", + "duration": 0 + }, + { + "id": 13748, + "name": "Paper", + "displayName": "Papers", + "color": "#0d42cc", + "duration": 15 + }, + { + "id": 13749, + "name": "Poster", + "displayName": "Posters", + "color": "#ff7a00", + "duration": 60 + }, + { + "id": 13751, + "name": "Workshop", + "displayName": "Workshops", + "color": "#f60000", + "duration": 480 + }, + { + "id": 13752, + "name": "Break", + "color": "#7f6aff", + "duration": 20 + }, + { + "id": 13754, + "name": "Ask Me Anything", + "color": "#8e008b", + "duration": 60 + }, + { + "id": 13755, + "name": "Visions", + "color": "#26e5f1", + "duration": 90 + }, + { + "id": 13756, + "name": "Student Innovation Contest", + "color": "#ff99ca", + "duration": 180 + }, + { + "id": 13757, + "name": "TOCHI Papers", + "color": "#acadb9", + "duration": 15 + }, + { + "id": 13758, + "name": "Keynotes", + "color": "#8e008b", + "duration": 80 + }, + { + "id": 13836, + "name": "Breаk", + "color": "#32d923", + "duration": 20 + } + ], + "timeSlots": [ + { + "id": 14312, + "type": "SESSION", + "startDate": 1728913200000, + "endDate": 1728916800000 + }, + { + "id": 14313, + "type": "SESSION", + "startDate": 1728999600000, + "endDate": 1729005000000 + }, + { + "id": 14314, + "type": "SESSION", + "startDate": 1729087200000, + "endDate": 1729091700000 + }, + { + "id": 14315, + "type": "SESSION", + "startDate": 1729008600000, + "endDate": 1729014000000 + }, + { + "id": 14316, + "type": "SESSION", + "startDate": 1729092900000, + "endDate": 1729096500000 + }, + { + "id": 14317, + "type": "SESSION", + "startDate": 1728928800000, + "endDate": 1728939600000 + }, + { + "id": 14318, + "type": "SESSION", + "startDate": 1729069200000, + "endDate": 1729072800000 + }, + { + "id": 14319, + "type": "SESSION", + "startDate": 1728918000000, + "endDate": 1728921600000 + }, + { + "id": 14320, + "type": "SESSION", + "startDate": 1729076400000, + "endDate": 1729080000000 + }, + { + "id": 14321, + "type": "SESSION", + "startDate": 1728810000000, + "endDate": 1728837000000 + }, + { + "id": 14322, + "type": "SESSION", + "startDate": 1728923400000, + "endDate": 1728928800000 + }, + { + "id": 14323, + "type": "SESSION", + "startDate": 1728988800000, + "endDate": 1728992400000 + }, + { + "id": 14324, + "type": "SESSION", + "startDate": 1728902400000, + "endDate": 1728906000000 + }, + { + "id": 14325, + "type": "SESSION", + "startDate": 1728982800000, + "endDate": 1728987300000 + }, + { + "id": 14326, + "type": "SESSION", + "startDate": 1729005000000, + "endDate": 1729008600000 + }, + { + "id": 14327, + "type": "SESSION", + "startDate": 1729072800000, + "endDate": 1729076400000 + }, + { + "id": 14328, + "type": "LUNCH", + "startDate": 1728906000000, + "endDate": 1728909600000 + }, + { + "id": 14329, + "type": "LUNCH", + "startDate": 1728909600000, + "endDate": 1728913200000 + }, + { + "id": 14330, + "type": "SESSION", + "startDate": 1728896400000, + "endDate": 1728901200000 + }, + { + "id": 14331, + "type": "BREAK", + "startDate": 1728901200000, + "endDate": 1728902400000 + }, + { + "id": 14332, + "type": "BREAK", + "startDate": 1728916800000, + "endDate": 1728918000000 + }, + { + "id": 14333, + "type": "BREAK", + "startDate": 1728921600000, + "endDate": 1728923400000 + }, + { + "id": 14334, + "type": "BREAK", + "startDate": 1729091700000, + "endDate": 1729092900000 + }, + { + "id": 14335, + "type": "LUNCH", + "startDate": 1729080000000, + "endDate": 1729087200000 + }, + { + "id": 14336, + "type": "SESSION", + "startDate": 1729098000000, + "endDate": 1729102800000 + }, + { + "id": 14337, + "type": "BREAK", + "startDate": 1728987300000, + "endDate": 1728988800000 + }, + { + "id": 14338, + "type": "LUNCH", + "startDate": 1728992400000, + "endDate": 1728996000000 + }, + { + "id": 14339, + "type": "LUNCH", + "startDate": 1728996000000, + "endDate": 1728999600000 + }, + { + "id": 14340, + "type": "BREAK", + "startDate": 1729014000000, + "endDate": 1729017600000 + }, + { + "id": 14341, + "type": "SESSION", + "startDate": 1729017600000, + "endDate": 1729030200000 + }, + { + "id": 14461, + "type": "SESSION", + "startDate": 1729083600000, + "endDate": 1729087200000 + }, + { + "id": 14462, + "type": "BREAK", + "startDate": 1729096500000, + "endDate": 1729098000000 + }, + { + "id": 14463, + "type": "SESSION", + "startDate": 1729091700000, + "endDate": 1729092600000 + }, + { + "id": 14464, + "type": "SESSION", + "startDate": 1728906000000, + "endDate": 1728906900000 + }, + { + "id": 14465, + "type": "SESSION", + "startDate": 1728916800000, + "endDate": 1728917700000 + }, + { + "id": 14466, + "type": "SESSION", + "startDate": 1729072800000, + "endDate": 1729073700000 + }, + { + "id": 14467, + "type": "SESSION", + "startDate": 1729080000000, + "endDate": 1729080900000 + }, + { + "id": 14468, + "type": "SESSION", + "startDate": 1728987300000, + "endDate": 1728988200000 + }, + { + "id": 14469, + "type": "SESSION", + "startDate": 1729014000000, + "endDate": 1729014900000 + }, + { + "id": 14470, + "type": "SESSION", + "startDate": 1729005000000, + "endDate": 1729005900000 + }, + { + "id": 14471, + "type": "SESSION", + "startDate": 1728921600000, + "endDate": 1728922500000 + }, + { + "id": 14472, + "type": "SESSION", + "startDate": 1728992400000, + "endDate": 1728993300000 + }, + { + "id": 14473, + "type": "SESSION", + "startDate": 1728810000000, + "endDate": 1728842400000 + }, + { + "id": 14474, + "type": "SESSION", + "startDate": 1728810000000, + "endDate": 1728838800000 + } + ], + "sessions": [ + { + "id": 171019, + "name": "Manipulating Text", + "isParallelPresentation": false, + "importedId": "jFKXNWJ04tNrWkaLtxsb2Q", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 170346 + ], + "contentIds": [ + 170790, + 170838, + 170930, + 170824 + ], + "source": "PCS", + "timeSlotId": 14312, + "note": "" + }, + { + "id": 171020, + "name": "Future Fabrics", + "isParallelPresentation": false, + "importedId": "VHdE9qb_Mc3z750n-oMbMg", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 170069, + 170328 + ], + "contentIds": [ + 170743, + 170811, + 170971, + 170935, + 171007, + 170873 + ], + "source": "PCS", + "timeSlotId": 14313, + "note": "" + }, + { + "id": 171021, + "name": "Storytime", + "isParallelPresentation": false, + "importedId": "5TxV7ybh0RGPQuqWh2h_tA", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 175053 + ], + "contentIds": [ + 170763, + 170765, + 170783, + 170730, + 170729, + 170806 + ], + "source": "PCS", + "timeSlotId": 14313, + "note": "" + }, + { + "id": 171022, + "name": "Beyond mobile", + "isParallelPresentation": false, + "importedId": "ZfgZ3hRm9WQVZOm_FJmt1A", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 175037 + ], + "contentIds": [ + 170844, + 170782, + 170914, + 170840 + ], + "source": "PCS", + "timeSlotId": 14312, + "note": "" + }, + { + "id": 171023, + "name": "Validation in AI/ML", + "isParallelPresentation": false, + "importedId": "aqMLjJ2JHhvuG5jMG860bw", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 170699 + ], + "contentIds": [ + 170826, + 170954, + 170831, + 170784, + 170762 + ], + "source": "PCS", + "timeSlotId": 14314, + "note": "" + }, + { + "id": 171024, + "name": "A11y", + "isParallelPresentation": false, + "importedId": "s24F7NU7F6bdQPaRGkgGUQ", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 169726 + ], + "contentIds": [ + 170960, + 170887, + 170825, + 170852, + 171009, + 170991 + ], + "source": "PCS", + "timeSlotId": 14315, + "note": "" + }, + { + "id": 171025, + "name": "Contextual Augmentations", + "isParallelPresentation": false, + "importedId": "yfqK1CM6_gQqCGOqzyw8cg", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 170235 + ], + "contentIds": [ + 171003, + 170940, + 170874, + 170953 + ], + "source": "PCS", + "timeSlotId": 14316, + "note": "" + }, + { + "id": 171026, + "name": "Dynamic Objects & Materials", + "isParallelPresentation": false, + "importedId": "-ggkErdaTamb3N7wgLme_w", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 170082 + ], + "contentIds": [ + 170860, + 170995, + 170742, + 170733 + ], + "source": "PCS", + "timeSlotId": 14312, + "note": "" + }, + { + "id": 171027, + "name": "Student Innovation Contest", + "isParallelPresentation": true, + "importedId": "DtCOG3qrrpCX5ggKfKoeZg", + "typeId": 13756, + "roomId": 11736, + "chairIds": [ + 170310, + 170328 + ], + "contentIds": [ + 170923, + 170899, + 170986, + 170738, + 170836, + 170737, + 171013, + 170822 + ], + "source": "PCS", + "timeSlotId": 14317, + "note": "" + }, + { + "id": 171028, + "name": "Generating Visuals", + "isParallelPresentation": false, + "importedId": "Xvyrf9oO9LfjWF-C-c46Jw", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 170543 + ], + "contentIds": [ + 170726, + 170924, + 170929, + 170878 + ], + "source": "PCS", + "timeSlotId": 14318, + "note": "" + }, + { + "id": 171029, + "name": "Movement-based UIs", + "isParallelPresentation": false, + "importedId": "OFx74y53bNsC_hMwy117rQ", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 169856 + ], + "contentIds": [ + 170858, + 170957, + 170847, + 170848 + ], + "source": "PCS", + "timeSlotId": 14319, + "note": "" + }, + { + "id": 171030, + "name": "Hacking Perception", + "isParallelPresentation": false, + "importedId": "fD8Y4sY3f1Qw6ObDlduu4g", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 175039 + ], + "contentIds": [ + 171017, + 170839, + 170728, + 170896 + ], + "source": "PCS", + "timeSlotId": 14319, + "note": "" + }, + { + "id": 171031, + "name": "New realities", + "isParallelPresentation": false, + "importedId": "K39X7bA_Uav0mh6C6wgVAA", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 170330 + ], + "contentIds": [ + 170989, + 171002, + 170805, + 170920, + 170853, + 170771 + ], + "source": "PCS", + "timeSlotId": 14315, + "note": "" + }, + { + "id": 171032, + "name": "Prototyping", + "isParallelPresentation": false, + "importedId": "gTbNhmucmJ-I_wh8oINvaw", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 170572 + ], + "contentIds": [ + 170974, + 170857, + 170881, + 170886 + ], + "source": "PCS", + "timeSlotId": 14318, + "note": "" + }, + { + "id": 171033, + "name": "Sustainable Interfaces", + "isParallelPresentation": false, + "importedId": "YoFd4rjUCoWbhH9Sq53hUg", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 169695 + ], + "contentIds": [ + 170959, + 170976, + 170931, + 170834 + ], + "source": "PCS", + "timeSlotId": 14320, + "note": "" + }, + { + "id": 171034, + "name": "Sound & Music", + "isParallelPresentation": false, + "importedId": "pTgEDm5RNs3b4nbQgq4Lig", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 175034 + ], + "contentIds": [ + 170927, + 170866, + 170952, + 170787, + 170955 + ], + "source": "PCS", + "timeSlotId": 14314, + "note": "" + }, + { + "id": 171035, + "name": "Demos", + "isParallelPresentation": true, + "importedId": "etCRqxQWf38sZA4ry_fyUw", + "typeId": 13744, + "roomId": 11736, + "chairIds": [ + 175054, + 170261 + ], + "contentIds": [ + 170780, + 170793, + 170792, + 170963, + 170809, + 170815, + 170967, + 170970, + 171011, + 171014, + 170799, + 170900, + 170904, + 170786, + 170807, + 170998, + 170988, + 170863, + 170718, + 170785, + 170983, + 170759, + 170748, + 170921, + 170745, + 170753, + 171005, + 170747, + 170777, + 170774, + 170801, + 170864, + 170757, + 170919, + 170794, + 170723, + 170813, + 170872, + 170810, + 170966, + 170973, + 170977, + 170868, + 170893, + 170804, + 170744, + 170808, + 170749, + 170984, + 170773, + 170778, + 170750, + 170796, + 170734, + 170897, + 170975, + 170901, + 170760, + 170903, + 170727, + 171004, + 170871, + 170758, + 170802, + 170746, + 170892, + 170797, + 170917, + 171015, + 170755, + 170968, + 170992, + 170869, + 170985, + 170735, + 170768, + 170997, + 170965, + 170999, + 170756, + 170870, + 170779, + 170993, + 170776, + 170969 + ], + "source": "PCS", + "timeSlotId": 14317, + "note": "" + }, + { + "id": 171036, + "name": "Doctoral Symposium (Not Public)", + "isParallelPresentation": true, + "importedId": "ju1MgqD0aX0QBuBq52vCag", + "typeId": 13745, + "roomId": 11737, + "chairIds": [ + 170298, + 171097, + 175055, + 169893, + 170310 + ], + "contentIds": [ + 170916, + 170891, + 170890, + 170908, + 170909, + 170928, + 170948, + 170958 + ], + "source": "PCS", + "timeSlotId": 14473, + "note": "" + }, + { + "id": 171037, + "name": "Learning to Learn", + "isParallelPresentation": false, + "importedId": "QX8OKI16JmWyesyOSf__NQ", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 170534 + ], + "contentIds": [ + 170882, + 170907, + 170964, + 170883 + ], + "source": "PCS", + "timeSlotId": 14316, + "note": "" + }, + { + "id": 171038, + "name": "Hot Interfaces", + "isParallelPresentation": false, + "importedId": "ebXS_MX2PuN6rqf0ThUMdA", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 169862 + ], + "contentIds": [ + 170880, + 170898, + 170885, + 170722 + ], + "source": "PCS", + "timeSlotId": 14318, + "note": "" + }, + { + "id": 171039, + "name": "Vision talks", + "isParallelPresentation": true, + "importedId": "Sezi1rbsnm8wgvRSfFDlfQ", + "typeId": 13755, + "roomId": 11738, + "chairIds": [ + 170021, + 175053 + ], + "contentIds": [ + 170850, + 171008 + ], + "source": "PCS", + "timeSlotId": 14322, + "note": "" + }, + { + "id": 171040, + "name": "FABulous", + "isParallelPresentation": false, + "importedId": "PDXOSeWLlvL0hskeLbrffA", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 170244 + ], + "contentIds": [ + 170736, + 170936, + 170731, + 170996, + 170944 + ], + "source": "PCS", + "timeSlotId": 14314, + "note": "" + }, + { + "id": 171041, + "name": "Haptics", + "isParallelPresentation": false, + "importedId": "BQuEVaC3g5QsQGNJcwHUmg", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 170424 + ], + "contentIds": [ + 171016, + 170767, + 170833, + 170972 + ], + "source": "PCS", + "timeSlotId": 14316, + "note": "" + }, + { + "id": 171042, + "name": "Vision-based UIs", + "isParallelPresentation": false, + "importedId": "Vnv2Y2m2Lyj4u0jLaG-bGg", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 170605 + ], + "contentIds": [ + 170938, + 170877, + 170932, + 170867 + ], + "source": "PCS", + "timeSlotId": 14323, + "note": "" + }, + { + "id": 171043, + "name": "Future of Typing", + "isParallelPresentation": false, + "importedId": "me9zy3zUZnHT4SLyOleFYg", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 170349 + ], + "contentIds": [ + 170879, + 170781, + 170817, + 170719 + ], + "source": "PCS", + "timeSlotId": 14324, + "note": "" + }, + { + "id": 171044, + "name": "Bodily Signals", + "isParallelPresentation": false, + "importedId": "HkM1owxHsv3oWhsHBEdVdA", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 169686 + ], + "contentIds": [ + 170939, + 170941, + 171010, + 171000 + ], + "source": "PCS", + "timeSlotId": 14323, + "note": "" + }, + { + "id": 171045, + "name": "Shared Spaces", + "isParallelPresentation": false, + "importedId": "4TDNaPEqGORBIaIeowbfFQ", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 170019 + ], + "contentIds": [ + 170854, + 170812, + 170856, + 170830, + 170843 + ], + "source": "PCS", + "timeSlotId": 14325, + "note": "" + }, + { + "id": 171046, + "name": "AI & Automation", + "isParallelPresentation": false, + "importedId": "cjR2usSfLZ5HkokMCgWGgg", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 169973 + ], + "contentIds": [ + 170835, + 170751, + 170861, + 170937 + ], + "source": "PCS", + "timeSlotId": 14323, + "note": "" + }, + { + "id": 171047, + "name": "Poses as Input", + "isParallelPresentation": false, + "importedId": "69oeBAcY39iYAalFAkCw6Q", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 175040 + ], + "contentIds": [ + 170905, + 170926, + 170875, + 170732, + 170956, + 170739 + ], + "source": "PCS", + "timeSlotId": 14313, + "note": "" + }, + { + "id": 171048, + "name": "AI as Copilot", + "isParallelPresentation": false, + "importedId": "z53-spZY5H89x36hjh322g", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 170554 + ], + "contentIds": [ + 170741, + 170918, + 170725, + 170827, + 171001, + 170933 + ], + "source": "PCS", + "timeSlotId": 14315, + "note": "" + }, + { + "id": 171049, + "name": "Poster Session A", + "isParallelPresentation": true, + "importedId": "RcnS6wLkindWbiHCGBPXAw", + "typeId": 13749, + "roomId": 11739, + "chairIds": [ + 175035, + 170696 + ], + "contentIds": [ + 170764, + 170884, + 170982, + 170947, + 170980, + 170772, + 170849, + 170888, + 170770, + 170821, + 170902, + 170912, + 170721, + 170889, + 170943, + 170855, + 170837, + 170769, + 170842, + 170962, + 170841, + 170752, + 170740 + ], + "source": "PCS", + "timeSlotId": 14326, + "note": "" + }, + { + "id": 171050, + "name": "Body as the interface", + "isParallelPresentation": false, + "importedId": "P1CVvKjgHllS3BKS9V6uig", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 169715 + ], + "contentIds": [ + 170798, + 170990, + 170775, + 170761 + ], + "source": "PCS", + "timeSlotId": 14324, + "note": "" + }, + { + "id": 171051, + "name": "New Vizualizations", + "isParallelPresentation": false, + "importedId": "niE57VtF01d1n53J_nTJzA", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 175035 + ], + "contentIds": [ + 170791, + 170911, + 170803, + 170894 + ], + "source": "PCS", + "timeSlotId": 14319, + "note": "" + }, + { + "id": 171052, + "name": "Big to Small Fab", + "isParallelPresentation": false, + "importedId": "JxkSz0ur2-y6KSyyrUEvkQ", + "typeId": 13748, + "roomId": 11734, + "chairIds": [ + 170328, + 170069 + ], + "contentIds": [ + 170846, + 170987, + 170934, + 171006, + 172831 + ], + "source": "PCS", + "timeSlotId": 14325, + "note": "" + }, + { + "id": 171053, + "name": "Machine Learning for User Interfaces", + "isParallelPresentation": false, + "importedId": "nEYcTOXlmOFOzGzObmBEtg", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 175036 + ], + "contentIds": [ + 170950, + 170823, + 170925, + 170994, + 170816 + ], + "source": "PCS", + "timeSlotId": 14325, + "note": "" + }, + { + "id": 171054, + "name": "Workshop - Democratizing Intelligent Soft Wearables", + "isParallelPresentation": false, + "importedId": "iw9U3W1hJnZHFyBwV3EFNA", + "typeId": 13751, + "roomId": 11803, + "chairIds": [], + "contentIds": [ + 170720 + ], + "source": "PCS", + "timeSlotId": 14474, + "note": "" + }, + { + "id": 171055, + "name": "Programming UI", + "isParallelPresentation": false, + "importedId": "mF5v7Drpx4O0LRiCmeVSDg", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 170064 + ], + "contentIds": [ + 170819, + 170922, + 171012, + 170951 + ], + "source": "PCS", + "timeSlotId": 14324, + "note": "" + }, + { + "id": 171056, + "name": "Next Gen Input", + "isParallelPresentation": false, + "importedId": "JU-apFWrcnV0C9Pxf3xKHA", + "typeId": 13748, + "roomId": 11735, + "chairIds": [ + 175038 + ], + "contentIds": [ + 170724, + 170876, + 171018, + 170795 + ], + "source": "PCS", + "timeSlotId": 14320, + "note": "" + }, + { + "id": 171057, + "name": "Workshop - Dynamic Abstractions: Building the Next Generation of Cognitive Tools and Interfaces", + "isParallelPresentation": false, + "importedId": "OraHl2c9xe1ip9nRDXA4EQ", + "typeId": 13751, + "roomId": 11799, + "chairIds": [], + "contentIds": [ + 170851 + ], + "source": "PCS", + "timeSlotId": 14474, + "note": "" + }, + { + "id": 171058, + "name": "Workshop - Bridging disciplines for a new era in Physical AI", + "isParallelPresentation": false, + "importedId": "hpWeMTLRZRYApYUlAubsYA", + "typeId": 13751, + "roomId": 11800, + "chairIds": [], + "contentIds": [ + 170895 + ], + "source": "PCS", + "timeSlotId": 14474, + "note": "" + }, + { + "id": 171059, + "name": "Poster Session B", + "isParallelPresentation": true, + "importedId": "6P5kk19rYEFV-M-T45hP7g", + "typeId": 13749, + "roomId": 11739, + "chairIds": [ + 175035, + 170696 + ], + "contentIds": [ + 170978, + 170910, + 170949, + 170961, + 170915, + 170818, + 170859, + 170906, + 170766, + 170788, + 170716, + 170717, + 170913, + 170979, + 170832, + 170862, + 170814, + 170865, + 170946, + 170942, + 170981, + 170945 + ], + "source": "PCS", + "timeSlotId": 14327, + "note": "" + }, + { + "id": 171060, + "name": "Workshop - HRI and UIST: Designing Socially Engaging Robot Interfaces", + "isParallelPresentation": false, + "importedId": "ZcJY8p9yC0o1ET9tlxXpNw", + "typeId": 13751, + "roomId": 11804, + "chairIds": [], + "contentIds": [ + 170829 + ], + "source": "PCS", + "timeSlotId": 14474, + "note": "" + }, + { + "id": 171061, + "name": "LLM: New applications", + "isParallelPresentation": false, + "importedId": "e1Zy9J7pRnHVZt9uo4felg", + "typeId": 13748, + "roomId": 11733, + "chairIds": [ + 170293 + ], + "contentIds": [ + 170789, + 170845, + 170820, + 170828 + ], + "source": "PCS", + "timeSlotId": 14320, + "note": "" + }, + { + "id": 171063, + "name": "AMA Session with Prof. Hiroshi Ishii: Vision-Driven Research", + "isParallelPresentation": false, + "importedId": "14649", + "typeId": 13754, + "roomId": 11735, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14329 + }, + { + "id": 171064, + "name": "AMA session with Dr. Cindy L. Bethel: NSF Grant Insights for HCI Researchers", + "isParallelPresentation": false, + "importedId": "14650", + "typeId": 13754, + "roomId": 11735, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14339 + }, + { + "id": 171065, + "name": "Banquet & Keynote (Yaser Sheikh)", + "isParallelPresentation": false, + "importedId": "14651", + "typeId": 13758, + "roomId": 11741, + "chairIds": [], + "contentIds": [ + 171103 + ], + "source": "SYS", + "timeSlotId": 14341 + }, + { + "id": 171066, + "name": "Opening Remarks & Keynote (Raj Reddy)", + "isParallelPresentation": false, + "importedId": "14652", + "typeId": 13758, + "roomId": 11738, + "chairIds": [], + "contentIds": [ + 171101 + ], + "source": "SYS", + "timeSlotId": 14330 + }, + { + "id": 171067, + "name": "Closing Remarks & Lasting Impact Award Winner Talk", + "isParallelPresentation": false, + "importedId": "14653", + "typeId": 13758, + "roomId": 11738, + "chairIds": [], + "contentIds": [ + 171110 + ], + "source": "SYS", + "timeSlotId": 14336 + }, + { + "id": 171068, + "name": "Coffee break", + "isParallelPresentation": false, + "importedId": "14654", + "typeId": 13836, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14331 + }, + { + "id": 171069, + "name": "Coffee break", + "isParallelPresentation": false, + "importedId": "14655", + "typeId": 13836, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14332 + }, + { + "id": 171070, + "name": "Break to change location", + "isParallelPresentation": false, + "importedId": "14656", + "typeId": 13836, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14333 + }, + { + "id": 171071, + "name": "Coffee break", + "isParallelPresentation": false, + "importedId": "14657", + "typeId": 13836, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14337 + }, + { + "id": 171072, + "name": "Break to change location", + "isParallelPresentation": false, + "importedId": "14658", + "typeId": 13836, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14340 + }, + { + "id": 171073, + "name": "Coffee break (with Meta recruiting table)", + "isParallelPresentation": false, + "importedId": "14659", + "typeId": 13836, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14334 + }, + { + "id": 175057, + "name": "Industry Career Information Session", + "isParallelPresentation": false, + "importedId": "14946", + "typeId": 13746, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14461 + }, + { + "id": 175058, + "name": "Coffee break", + "isParallelPresentation": false, + "importedId": "14947", + "typeId": 13836, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14462 + }, + { + "id": 175059, + "name": "Break Q&A: Haptics", + "isParallelPresentation": true, + "importedId": "uqdmbBJdHn2RcDSjZKKEcg", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 171016, + 170767, + 170833, + 170972, + 170982 + ], + "source": "PCS", + "timeSlotId": 14463, + "note": "" + }, + { + "id": 175060, + "name": "Break Q&A: Body as the interface", + "isParallelPresentation": true, + "importedId": "jcgjKA95sgecA0i0wB_-Dg", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170798, + 170990, + 170775, + 170761, + 170981, + 170961 + ], + "source": "PCS", + "timeSlotId": 14464, + "note": "" + }, + { + "id": 175061, + "name": "Break Q&A: Vision-based UIs", + "isParallelPresentation": true, + "importedId": "NcPE6q0YzamVSoHRDET5ww", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170938, + 170877, + 170932, + 170867, + 170946 + ], + "source": "PCS", + "timeSlotId": 14472, + "note": "" + }, + { + "id": 175062, + "name": "Break Q&A: Next Gen Input", + "isParallelPresentation": true, + "importedId": "TBcMMUnUdyXPleYV6iC20w", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170724, + 170876, + 171018, + 170795, + 170912 + ], + "source": "PCS", + "timeSlotId": 14467, + "note": "" + }, + { + "id": 175063, + "name": "Break Q&A: Future of Typing", + "isParallelPresentation": true, + "importedId": "ATSxEDAzsm7GrP28tduung", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170879, + 170781, + 170817, + 170719 + ], + "source": "PCS", + "timeSlotId": 14464, + "note": "" + }, + { + "id": 175064, + "name": "Break Q&A: Storytime", + "isParallelPresentation": true, + "importedId": "WTClZEj7Q3bNdHiRwRsxwA", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170763, + 170765, + 170783, + 170730, + 170729, + 170806, + 170949, + 170906 + ], + "source": "PCS", + "timeSlotId": 14470, + "note": "" + }, + { + "id": 175065, + "name": "Break Q&A: Manipulating Text", + "isParallelPresentation": true, + "importedId": "cGXusIOWvnFw_9wmjQJSpQ", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170790, + 170838, + 170930, + 170824, + 170980 + ], + "source": "PCS", + "timeSlotId": 14465, + "note": "" + }, + { + "id": 175066, + "name": "Break Q&A: Hot Interfaces", + "isParallelPresentation": true, + "importedId": "hiYBtGWddKsIuO4J6nu5fw", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170880, + 170898, + 170885, + 170722 + ], + "source": "PCS", + "timeSlotId": 14466, + "note": "" + }, + { + "id": 175067, + "name": "Break Q&A: LLM: New applications", + "isParallelPresentation": true, + "importedId": "wNPXO6Knt_tMup33iqU-Tg", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170789, + 170845, + 170820, + 170828, + 170837, + 170821 + ], + "source": "PCS", + "timeSlotId": 14467, + "note": "" + }, + { + "id": 175068, + "name": "Break Q&A: Big to Small Fab", + "isParallelPresentation": false, + "importedId": "YdBQhTQylSs9HyPxWRXPMg", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170846, + 170987, + 170934, + 171006, + 172831, + 170740, + 170862 + ], + "source": "PCS", + "timeSlotId": 14468, + "note": "" + }, + { + "id": 175069, + "name": "Break Q&A: Shared Spaces", + "isParallelPresentation": true, + "importedId": "x4tbtggHnp2oobT3DhlfzA", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170854, + 170843, + 170812, + 170856, + 170830, + 170889 + ], + "source": "PCS", + "timeSlotId": 14468, + "note": "" + }, + { + "id": 175070, + "name": "Break Q&A: Learning to Learn", + "isParallelPresentation": true, + "importedId": "kd7182BiRprAo0Sd_aFp0w", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170882, + 170907, + 170964, + 170883, + 170947 + ], + "source": "PCS", + "timeSlotId": 14463, + "note": "" + }, + { + "id": 175071, + "name": "Break Q&A: Generating Visuals", + "isParallelPresentation": true, + "importedId": "gEoIFdhHJI1pLeOW-jeGjA", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170726, + 170924, + 170929, + 170878, + 170752, + 170841 + ], + "source": "PCS", + "timeSlotId": 14466, + "note": "" + }, + { + "id": 175072, + "name": "Break Q&A: Hacking Perception", + "isParallelPresentation": true, + "importedId": "J-ZJ4g9y3Pg_m1pm5Nf5DQ", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 171017, + 170839, + 170728, + 170896, + 170788, + 170902 + ], + "source": "PCS", + "timeSlotId": 14471, + "note": "" + }, + { + "id": 175073, + "name": "Break Q&A: Beyond mobile", + "isParallelPresentation": true, + "importedId": "CXY8ZKCp7jjfYowWb-we-g", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170844, + 170782, + 170914, + 170840, + 170721, + 170764 + ], + "source": "PCS", + "timeSlotId": 14465, + "note": "" + }, + { + "id": 175074, + "name": "Break Q&A: New realities", + "isParallelPresentation": true, + "importedId": "6RdbLwpuH-zDhGkzyqyiKw", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170989, + 171002, + 170805, + 170920, + 170853, + 170771, + 170945 + ], + "source": "PCS", + "timeSlotId": 14469, + "note": "" + }, + { + "id": 175075, + "name": "Break Q&A: Contextual Augmentations", + "isParallelPresentation": true, + "importedId": "5T4Zvhbi8nurFLToO55u0Q", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 171003, + 170940, + 170874, + 170953, + 170772 + ], + "source": "PCS", + "timeSlotId": 14463, + "note": "" + }, + { + "id": 175076, + "name": "Break Q&A: Machine Learning for User Interfaces", + "isParallelPresentation": true, + "importedId": "D-fFeDnAhfMtkjdrV-yalA", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170950, + 170823, + 170925, + 170994, + 170816, + 170842 + ], + "source": "PCS", + "timeSlotId": 14468, + "note": "" + }, + { + "id": 175077, + "name": "Break Q&A: Poses as Input", + "isParallelPresentation": true, + "importedId": "7dxz3FN6Q94Fn-Ao8edoiA", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170905, + 170926, + 170875, + 170732, + 170956, + 170739, + 170716, + 170910 + ], + "source": "PCS", + "timeSlotId": 14470, + "note": "" + }, + { + "id": 175078, + "name": "Break Q&A: A11y", + "isParallelPresentation": true, + "importedId": "091fMGiMb1pSvwzvlCfeSA", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170960, + 170887, + 170825, + 170852, + 171009, + 170991, + 170913, + 170832 + ], + "source": "PCS", + "timeSlotId": 14469, + "note": "" + }, + { + "id": 175079, + "name": "Break Q&A: Sustainable Interfaces", + "isParallelPresentation": true, + "importedId": "tNt4qfnkR51al7LuJAK7Ug", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170959, + 170976, + 170931, + 170834 + ], + "source": "PCS", + "timeSlotId": 14467, + "note": "" + }, + { + "id": 175080, + "name": "Break Q&A: FABulous", + "isParallelPresentation": true, + "importedId": "1nctprJNKO78T8SPy8Z-lw", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170736, + 170936, + 170731, + 170996, + 170944, + 170865, + 170888 + ], + "source": "PCS", + "timeSlotId": 14463, + "note": "" + }, + { + "id": 175081, + "name": "Break Q&A: Programming UI", + "isParallelPresentation": true, + "importedId": "csWmtEbKGkhDihuxZj73Jw", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170819, + 170922, + 171012, + 170951, + 170766, + 170943 + ], + "source": "PCS", + "timeSlotId": 14464, + "note": "" + }, + { + "id": 175082, + "name": "Break Q&A: AI & Automation", + "isParallelPresentation": true, + "importedId": "fT8ouvR_y4wLtR-H8dKbDQ", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170835, + 170751, + 170861, + 170937, + 170855 + ], + "source": "PCS", + "timeSlotId": 14472, + "note": "" + }, + { + "id": 175083, + "name": "Break Q&A: AI as Copilot", + "isParallelPresentation": true, + "importedId": "wxBWMtzPxg_tTGtT3l_RPw", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170741, + 170918, + 170725, + 170827, + 171001, + 170933 + ], + "source": "PCS", + "timeSlotId": 14469, + "note": "" + }, + { + "id": 175084, + "name": "Break Q&A: Validation in AI/ML", + "isParallelPresentation": true, + "importedId": "FQDmE4UmfCbnhgrULcbN1w", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170826, + 170954, + 170831, + 170784, + 170762 + ], + "source": "PCS", + "timeSlotId": 14463, + "note": "" + }, + { + "id": 175085, + "name": "Break Q&A: Bodily Signals", + "isParallelPresentation": true, + "importedId": "DqKhazsafkdAnSTDLsVi9g", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170939, + 170941, + 171010, + 171000, + 170849, + 170859 + ], + "source": "PCS", + "timeSlotId": 14472, + "note": "" + }, + { + "id": 175086, + "name": "Break Q&A: Future Fabrics", + "isParallelPresentation": true, + "importedId": "A-tMG6i_E_tyXa-YJ-ISQQ", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170743, + 170811, + 170971, + 170935, + 171007, + 170873, + 170814, + 170942 + ], + "source": "PCS", + "timeSlotId": 14470, + "note": "" + }, + { + "id": 175087, + "name": "Break Q&A: Dynamic Objects & Materials", + "isParallelPresentation": true, + "importedId": "9r0uJor-S6kg4jf58QC_mQ", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170860, + 170995, + 170742, + 170733, + 170770 + ], + "source": "PCS", + "timeSlotId": 14465, + "note": "" + }, + { + "id": 175088, + "name": "Break Q&A: Prototyping", + "isParallelPresentation": true, + "importedId": "GNbZGbt6m4M7BbPA8XdYJw", + "typeId": 13748, + "roomId": 11734, + "chairIds": [], + "contentIds": [ + 170974, + 170857, + 170881, + 170886, + 170884, + 170962 + ], + "source": "PCS", + "timeSlotId": 14466, + "note": "" + }, + { + "id": 175089, + "name": "Break Q&A: New Vizualizations", + "isParallelPresentation": true, + "importedId": "8XfvSLtfyXBIEAvcdzveAQ", + "typeId": 13748, + "roomId": 11733, + "chairIds": [], + "contentIds": [ + 170791, + 170911, + 170803, + 170894, + 170818, + 170978 + ], + "source": "PCS", + "timeSlotId": 14471, + "note": "" + }, + { + "id": 175090, + "name": "Break Q&A: Movement-based UIs", + "isParallelPresentation": true, + "importedId": "N_XT9q4XXyxSFaHBIDelnA", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170858, + 170957, + 170847, + 170848, + 170769, + 170717 + ], + "source": "PCS", + "timeSlotId": 14471, + "note": "" + }, + { + "id": 175091, + "name": "Break Q&A: Sound & Music", + "isParallelPresentation": true, + "importedId": "WW9CgQUNto302gq2eNfwrQ", + "typeId": 13748, + "roomId": 11735, + "chairIds": [], + "contentIds": [ + 170927, + 170866, + 170952, + 170787, + 170955, + 170979 + ], + "source": "PCS", + "timeSlotId": 14463, + "note": "" + }, + { + "id": 175096, + "name": "Lunch Break", + "isParallelPresentation": false, + "importedId": "14952", + "typeId": 13836, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14328 + }, + { + "id": 175097, + "name": "Lunch Break", + "isParallelPresentation": false, + "importedId": "14953", + "typeId": 13836, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14329 + }, + { + "id": 175098, + "name": "Lunch Break", + "isParallelPresentation": false, + "importedId": "14954", + "typeId": 13836, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14338 + }, + { + "id": 175099, + "name": "Lunch Break", + "isParallelPresentation": false, + "importedId": "14955", + "typeId": 13836, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14339 + }, + { + "id": 175100, + "name": "Lunch Break", + "isParallelPresentation": false, + "importedId": "14956", + "typeId": 13836, + "chairIds": [], + "contentIds": [], + "source": "SYS", + "timeSlotId": 14335 + } + ], + "events": [ + { + "id": 171104, + "name": "Registration", + "isParallelPresentation": false, + "importedId": "14684", + "typeId": 13746, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "startDate": 1728892800000, + "endDate": 1728928800000, + "presenterIds": [], + "source": "SYS" + }, + { + "id": 171105, + "name": "Workshop Registration", + "isParallelPresentation": false, + "importedId": "14685", + "typeId": 13746, + "roomId": 11802, + "chairIds": [], + "contentIds": [], + "startDate": 1728806400000, + "endDate": 1728810000000, + "presenterIds": [], + "source": "SYS" + }, + { + "id": 171106, + "name": "Lactation Room", + "isParallelPresentation": false, + "importedId": "14686", + "typeId": 13746, + "chairIds": [], + "contentIds": [], + "presenterIds": [], + "source": "SYS" + }, + { + "id": 171107, + "name": "Silent Room", + "isParallelPresentation": false, + "importedId": "14687", + "typeId": 13746, + "chairIds": [], + "contentIds": [], + "presenterIds": [], + "source": "SYS" + }, + { + "id": 171108, + "name": "Speaker Prep", + "isParallelPresentation": false, + "importedId": "14688", + "typeId": 13746, + "chairIds": [], + "contentIds": [], + "presenterIds": [], + "source": "SYS" + }, + { + "id": 171111, + "name": "Registration", + "isParallelPresentation": false, + "importedId": "14689", + "typeId": 13746, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "startDate": 1728979200000, + "endDate": 1729015200000, + "presenterIds": [], + "source": "SYS" + }, + { + "id": 171112, + "name": "Registration", + "isParallelPresentation": false, + "importedId": "14690", + "typeId": 13746, + "roomId": 11739, + "chairIds": [], + "contentIds": [], + "startDate": 1729065600000, + "endDate": 1729080000000, + "presenterIds": [], + "source": "SYS" + } + ], + "contents": [ + { + "id": 170716, + "typeId": 13749, + "title": "TeleHand: Hand-only Teleportation for Distant Object Pointing in Virtual Reality", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-7864", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175077, + 171059 + ], + "eventIds": [], + "abstract": "The goal of this study is to present an easy-to-use and accurate pointing technique for distant objects in a virtual reality environment. For this purpose, we propose TeleHand pointing, in which the user first teleports only their hand near a target and points at the target using the ray cast from the teleported hand. To evaluate the usability of our method, we performed a user study in which participants repeated selection tasks for distant targets. As a result, we found that, although the proposed method slightly increases the task completion time, it significantly reduces pointing errors for distant targets compared to the traditional ray-based method.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Shibaura Institute of Technology", + "dsl": "" + } + ], + "personId": 170286 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Shibaura institute of technology", + "dsl": "" + } + ], + "personId": 170380 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Shibaura Institute of Technology", + "dsl": "" + } + ], + "personId": 170492 + } + ] + }, + { + "id": 170717, + "typeId": 13749, + "title": "Efficient Optimal Mouse Sensor Position Estimation using Simulated Cursor Trajectories", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-7623", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175090, + 171059 + ], + "eventIds": [], + "abstract": "The optimal sensor position on a computer mouse can improve pointing performance, but existing calibration methods require time-consuming repetitions of pointing tasks. In this paper, we propose a novel calibration approach that dramatically reduces the time and effort required to determine a user's optimal mouse sensor position. Our method simulates cursor trajectories for different sensor positions using a dual-sensor mouse, eliminating the need for repetitive measurements with multiple sensor placements. By analyzing the straightness of the simulated paths, quantified by the mean absolute error (MAE) relative to an ideal straight-line path, we estimate the sensor position that would yield the most efficient pointing motion for the user. Our preliminary results indicate that the proposed simulation-based calibration method could reduce the calibration time from an hour to just five minutes, while providing a better identification of the optimal mouse sensor positions.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + } + ], + "personId": 170354 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + } + ], + "personId": 170431 + } + ] + }, + { + "id": 170718, + "typeId": 13744, + "title": "MyWebstrates: Webstrates as Local-first Software", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1119", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Webstrates are web substrates, a practical realization of shareable dynamic media under which distributability, shareability, and malleability are fundamental software principles. Webstrates blur the distinction between application and document in a way that enables users to share, repurpose, and refit software across a variety of domains, but its reliance on a central server constrains its use; it is at odds with personal and collective control of data; and limits applications to the web. We extend the fundamental principles to include \\emph{interoperability} and \\emph{sovereignty} over data and propose MyWebstrates, an implementation of Webstrates on top of a new, lower-level substrate for synchronization built around local-first software principles. MyWebstrates registers itself in the user’s browser and function as a piece of local software that can selectively synchronise data over sync servers or peer-to-peer connections. We show how MyWebstrates extends Webstrates to enable offline collaborative use, interoperate between Webstrates on non-web technologies such as Unity, and maintain personal and collective sovereignty over data. We demonstrate how this enables new types of applications of Webstrates and discuss limitations of this approach and new challenges that it reveals.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170679 + }, + { + "affiliations": [ + { + "country": "France", + "state": "", + "city": "Paris", + "institution": "Institut Polytechnique de Paris", + "dsl": "LTCI, Télécom Paris" + } + ], + "personId": 169778 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Ink & Switch", + "dsl": "" + } + ], + "personId": 170688 + } + ] + }, + { + "id": 170719, + "typeId": 13748, + "title": "Can Capacitive Touch Images Enhance Mobile Keyboard Decoding?", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676420" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6080", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175063, + 171043 + ], + "eventIds": [], + "abstract": "Capacitive touch sensors capture the two-dimensional spatial profile (referred to as a touch heatmap) of a finger's contact with a mobile touchscreen. However, the research and design of touchscreen mobile keyboards -- one of the most speed and accuracy demanding touch interfaces -- has focused on the location of the touch centroid derived from the touch image heatmap as the input, discarding the rest of the raw spatial signals. In this paper, we investigate whether touch heatmaps can be leveraged to further improve the tap decoding accuracy for mobile touchscreen keyboards. Specifically, we developed and evaluated machine-learning models that interpret user taps by using the centroids and/or the heatmaps as their input and studied the contribution of the heatmaps to model performance. The results show that adding the heatmap into the input feature set led to 21.4% relative reduction of character error rates on average, compared to using the centroid alone. Furthermore, we conducted a live user study with the centroid-based and heatmap-based decoders built into Pixel 6 Pro devices and observed lower error rate, faster typing speed, and higher self-reported satisfaction score based on the heatmap-based decoder than the centroid-based decoder. These findings underline the promise of utilizing touch heatmaps for improving typing experience in mobile keyboards.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170672 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170342 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170392 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170292 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170254 + } + ] + }, + { + "id": 170720, + "typeId": 13751, + "title": "Democratizing Intelligent Soft Wearables", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686707" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24g-9399", + "source": "PCS", + "trackId": 13208, + "tags": [], + "keywords": [], + "sessionIds": [ + 171054 + ], + "eventIds": [], + "abstract": "Wearables have long been integral to human culture and daily life. Recent advances in intelligent soft wearables have dramatically transformed how we interact with the world, enhancing our health, productivity, and overall well-being. These innovations, combining advanced sensor design, fabrication, and computational power, offer unprecedented opportunities for monitoring, assistance, and augmentation. However, the benefits of these advancements are not yet universally accessible. Economic and technical barriers often limit the reach of these technologies to domain-specific experts. There is a growing need for democratizing intelligent wearables that are scalable, seamlessly integrated, customized, and adaptive. By bringing researchers from relevant disciplines together, this workshop aims to identify the challenges and investigate opportunities for democratizing intelligent soft wearables within the HCI community via interactive demos, invited keynotes, and focused panel discussions.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "" + } + ], + "personId": 170698 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "" + } + ], + "personId": 169900 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "MIT Media Lab" + } + ], + "personId": 170220 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Georgia", + "city": "Atlanta", + "institution": "Interactive Computing", + "dsl": "Georgia Institute of Technology" + } + ], + "personId": 170665 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Accenture Labs", + "dsl": "" + } + ], + "personId": 170013 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "ITHACA", + "institution": "Cornell University", + "dsl": "Computing and Information Science" + } + ], + "personId": 169867 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170347 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT Media Lab", + "dsl": "Responsive Environments" + } + ], + "personId": 170610 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Department of Electrical & Computer Engineering (ECE)" + }, + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology (MIT)", + "dsl": "Computer Science and Artificial Intelligence Laboratory (CSAIL)" + } + ], + "personId": 169725 + } + ] + }, + { + "id": 170721, + "typeId": 13749, + "title": "LOST STAR: An Interactive Stereoscopic Picture Book Installation for Children's Bedtime Rituals", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-8957", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175073 + ], + "eventIds": [], + "abstract": "Sleep disorders in early childhood hinder development and disrupt daily functions, but effective bedtime rituals can help. However, the integration of tangible and virtual interactions in sleep-related design projects remains unexplored. This paper investigates how pop-up books and multimedia can create a playful and accessible way for children to engage in bedtime activities. We present the design of Lost Star, a sonic and tactile Tangible User Interface (TUI) designed to enhance the quality of bedtime rituals for children with sleep problems. Additionally, we explore Joint Media Engagement (JME) to foster meaningful interactions between parents and children, enhancing bedtime routines and improving sleep outcomes.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Communication University of China", + "dsl": "School of Animation and Digital Arts" + } + ], + "personId": 169738 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "北京 / 北京市", + "institution": "Communication University of China", + "dsl": "" + } + ], + "personId": 170172 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Communication University of China", + "dsl": "School of Animation and Digital Arts" + } + ], + "personId": 169980 + } + ] + }, + { + "id": 170722, + "typeId": 13748, + "title": "Hydroptical Thermal Feedback: Spatial Thermal Feedback Using Visible Lights and Water", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676453" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2281", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175066, + 171038 + ], + "eventIds": [], + "abstract": "We control the temperature of materials in everyday interactions, recognizing temperature's important influence on our bodies, minds, and experiences. However, thermal feedback is an under-explored modality in human-computer interaction partly due to its limited temporal (slow) and spatial (small-area and non-moving) capabilities. We introduce hydroptical thermal feedback, a spatial thermal feedback method that works by applying visible lights on body parts in water. Through physical measurements and psychophysical experiments, our results show: (1) Humans perceive thermal sensations when visible lights are cast on the skin under water, and perceived warmth is greater for lights with shorter wavelengths, (2) temporal capabilities, (3) apparent motion (spatial) of warmth and coolness sensations, and (4) hydroptical thermal feedback can support the perceptual illusion that the water itself is warmer. We propose applications, including virtual reality (VR), shared water experiences, and therapies. Overall, this paper contributes hydroptical thermal feedback as a novel method, empirical results demonstrating its unique capabilities, proposed applications, and design recommendations for using hydroptical thermal feedback. Our method introduces controlled, spatial thermal perceptions to water experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Georgia", + "city": "Atlanta", + "institution": "Georgia Institute of Technology", + "dsl": "Digital Media" + } + ], + "personId": 170098 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "University of Tokyo", + "dsl": "" + } + ], + "personId": 169894 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Fukuoka", + "institution": "Kyushu University", + "dsl": "" + } + ], + "personId": 170372 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Georgia", + "city": "Atlanta", + "institution": "Georgia Institute of Technology", + "dsl": "Digital Media" + } + ], + "personId": 169701 + } + ] + }, + { + "id": 170723, + "typeId": 13744, + "title": "Fiery Hands: Designing Thermal Glove through Thermal and Tactile Integration for Virtual Object Manipulation", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1110", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We present a novel approach to render thermal and tactile feedback to the palm and fingertips through thermal and tactile integration. Our approach minimizes the obstruction of the palm and inner side of the fingers and enables virtual object manipulation while providing localized and global thermal feedback. By leveraging thermal actuators positioned strategically on the outer palm and back of the fingers in interplay with tactile actuators, our approach exploits thermal referral and tactile masking phenomena. Through a series of user studies, we validate the perception of localized thermal sensations across the palm and fingers, showcasing the ability to generate diverse thermal patterns. Furthermore, we demonstrate the efficacy of our approach in VR applications, replicating diverse thermal interactions with virtual objects. This work represents significant progress in thermal interactions within VR, offering enhanced sensory immersion at an optimal energy cost.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "" + }, + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "" + } + ], + "personId": 169941 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Erik Jonsson School of Engineering and Computer Science" + }, + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Erik Jonsson School of Engineering and Computer Science" + } + ], + "personId": 170331 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Allen", + "institution": "The University of Texas at Dallas", + "dsl": "Department of Computer Science" + }, + { + "country": "United States", + "state": "Texas", + "city": "Allen", + "institution": "The University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170058 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + }, + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170424 + } + ] + }, + { + "id": 170724, + "typeId": 13748, + "title": "PointerVol: A Laser Pointer for Swept Volumetric Displays", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676432" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1193", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171056, + 175062 + ], + "eventIds": [], + "abstract": "A laser pointer is a commonly used device that does not require communication with the display system or modifications on the applications, the presenter can just take a pointer and start using it. When a laser pointer is used on a volumetric display, a line rather than a point appears, making it not suitable for pointing at 3D locations. PointerVol is a modified laser pointer that allows users to point to 3D positions inside a swept volumetric display. We propose two PointerVol implementations based on timing and distance measurements, we evaluate the pointing performance using them. Finally, we present other features such as multi-user pointing, line patterns and a multi-finger wearable. PointerVol is a simple device that can help to popularize volumetric displays, or at least to make them more usable for presentations with true-3D content.", + "authors": [ + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Pública de Navarra", + "dsl": "" + } + ], + "personId": 170606 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Pública de Navarra", + "dsl": "" + } + ], + "personId": 170565 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Public University of Navarra", + "dsl": "" + } + ], + "personId": 169948 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Pública de Navarra", + "dsl": "" + } + ], + "personId": 169865 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Pública de Navarra", + "dsl": "" + } + ], + "personId": 169744 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "Navarre", + "city": "Pamplona", + "institution": "Universidad Publica de Navarra", + "dsl": "UpnaLab" + } + ], + "personId": 170633 + } + ] + }, + { + "id": 170725, + "typeId": 13748, + "title": "VizGroup: An AI-assisted Event-driven System for Collaborative Programming Learning Analytics", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676347" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8024", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175083, + 171048 + ], + "eventIds": [], + "abstract": "Programming instructors often conduct collaborative learning activities, like Peer Instruction, to foster a deeper understanding in students and enhance their engagement with learning. These activities, however, may not always yield productive outcomes due to the diversity of student mental models and their ineffective collaboration. In this work, we introduce VizGroup, an AI-assisted system that enables programming instructors to easily oversee students' real-time collaborative learning behaviors during large programming courses. VizGroup leverages Large Language Models (LLMs) to recommend event specifications for instructors so that they can simultaneously track and receive alerts about key correlation patterns between various collaboration metrics and ongoing coding tasks. We evaluated VizGroup with 12 instructors in a comparison study using a dataset collected from a Peer Instruction activity that was conducted in a large programming lecture. \r\nThe results showed that VizGroup helped instructors effectively overview, narrow down, and track nuances throughout students' behaviors.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Blacksburg", + "institution": "Virginia Tech", + "dsl": "Department of Computer Science" + } + ], + "personId": 170123 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170490 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170144 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Blacksburg", + "institution": "Virginia Tech", + "dsl": "Department of Computer Science" + } + ], + "personId": 170437 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Georgia", + "city": "Atlanta", + "institution": "Georgia Institute of Technology", + "dsl": "School of Interactive Computing" + } + ], + "personId": 170221 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Blacksburg", + "institution": "Virginia Tech", + "dsl": "Department of Computer Science" + } + ], + "personId": 170476 + } + ] + }, + { + "id": 170726, + "typeId": 13748, + "title": "ShadowMagic: Designing Human-AI Collaborative Support for Comic Professionals’ Shadowing", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676332" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2283", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171028, + 175071 + ], + "eventIds": [], + "abstract": "Shadowing allows artists to convey realistic volume and emotion of characters in comic colorization. While AI technologies have the potential to improve professionals’ shadowing experience, current practice is manual and time-consuming. To understand how we can improve their shadowing experience, we conducted interviews with 5 professionals. We found that professionals’ level of engagement can vary depending on semantics, such as characters’ faces or hair. We also found they spent time on shadow “landscaping”—deciding where to place large shadow regions to create a realistic volumetric presentation while the final results can vary dramatically depending on their “staging” and “attention guiding” needs. We discovered they would accept AI suggestions for less engaging semantic parts or landscaping, while needing the capability to adjust details. Based on our observations, we developed ShadowMagic, which (1) generates AI-driven shadows based on commonly used light directions, (2) enables users to selectively choose results depending on semantics, and (3) allows users to complete shadow areas themselves for further perfection. Through a summative evaluation with 5 professionals, we found that they were significantly more satisfied with our AI-driven results compared to a baseline. We also found that ShadowMagic’s “step by step” workflow helps participants more easily adopt AI-driven results. We conclude by providing implications.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Fairfax", + "institution": "George Mason University", + "dsl": "Information Sciences and Technology" + } + ], + "personId": 169766 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "CENTREVILLE", + "institution": "George Mason University", + "dsl": "Computer Science Department" + } + ], + "personId": 169804 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Midjourney", + "dsl": "" + } + ], + "personId": 169907 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Fairfax", + "institution": "George Mason University", + "dsl": "" + } + ], + "personId": 170005 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Pusan", + "institution": "Pusan National University", + "dsl": "" + } + ], + "personId": 170116 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Fairfax", + "institution": "George Mason University", + "dsl": "Computer Science" + } + ], + "personId": 170297 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Fairfax", + "institution": "George Mason University", + "dsl": "Information Sciences and Technology" + } + ], + "personId": 170487 + } + ] + }, + { + "id": 170727, + "typeId": 13744, + "title": "Demonstrating FIRE: Mid-Air Thermo-Tactile Display", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686769" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1111", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Thermal Feedback, Mid-Air Haptics, Virtual Reality" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We present an innovative mid-air thermo-tactile display system based on ultrasound haptics. The system features an open-top chamber, heat modules, and an ultrasound haptic display. Our approach directs heated airflow toward the pressure point created by the ultrasound display, delivering thermal and tactile sensations simultaneously in mid-air. We demonstrate our system in four distinct VR environments—campfire, water fountain, kitchen, and candle—highlighting the enhanced user experiences made possible by the integration of thermal and tactile feedback.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Erik Jonsson School of Engineering and Computer Science" + } + ], + "personId": 170331 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "" + } + ], + "personId": 169941 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170424 + } + ] + }, + { + "id": 170728, + "typeId": 13748, + "title": "Augmented Breathing via Thermal Feedback in the Nose", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676438" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8023", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171030, + 175072 + ], + "eventIds": [], + "abstract": "We propose, engineer, and study a novel method to augment the feeling of breathing—enabling interactive applications to let users feel like they are inhaling more/less air (perceived nasal airflow). We achieve this effect by cooling or heating the nose in sync with the user’s inhalation. Our illusion builds on the physiology of breathing: we perceive our breath predominantly through the cooling of our nasal cavities during inhalation. This is why breathing in a “fresh” cold environment feels easier than in a “stuffy” hot environment, even when the inhaled volume is the same. Our psychophysical study confirmed that our in-nose temperature stimulation significantly influenced breathing perception in both directions: making it feel harder & easier to breathe. Further, we found that ~90% of the trials were described as a change in perceived airflow/breathing, while only ~8% as temperature. Following, we engineered a compact device worn across the septum that uses Peltier elements. We illustrate the potential of this augmented breathing in interactive contexts, such as for virtual reality (e.g., rendering ease of breathing crisp air or difficulty breathing with a deteriorated gas mask) and everyday interactions (e.g., in combination with a relaxation application or to alleviate the perceived breathing resistance when wearing a mask).", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169715 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170388 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170188 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170376 + } + ] + }, + { + "id": 170729, + "typeId": 13748, + "title": "Patchview: LLM-powered Worldbuilding with Generative Dust and Magnet Visualization", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676352" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8386", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171021, + 175064 + ], + "eventIds": [], + "abstract": "Large language models (LLMs) can help writers build story worlds by generating world elements, such as factions, characters, and locations. However, making sense of many generated elements can be overwhelming. Moreover, if the user wants to precisely control aspects of generated elements that are difficult to specify verbally, prompting alone may be insufficient. We introduce Patchview, a customizable LLM-powered system that visually aids worldbuilding by allowing users to interact with story concepts and elements through the physical metaphor of magnets and dust. Elements in Patchview are visually dragged closer to concepts with high relevance, facilitating sensemaking. The user can also steer the generation with verbally elusive concepts by indicating the desired position of the element between concepts. When the user disagrees with the LLM's visualization and generation, they can correct those by repositioning the element. These corrections can be used to align the LLM's future behaviors to the user's perception. With a user study, we show that Patchview supports the sensemaking of world elements and steering of element generation, facilitating exploration during the worldbuilding process. Patchview provides insights on how customizable visual representation can help sensemake, steer, and align generative AI model behaviors with the user's intentions.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Midjourney", + "dsl": "" + } + ], + "personId": 169907 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Midjourney", + "dsl": "" + } + ], + "personId": 169936 + } + ] + }, + { + "id": 170730, + "typeId": 13748, + "title": "DrawTalking: Building Interactive Worlds by Sketching and Speaking", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676334" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7177", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171021, + 175064 + ], + "eventIds": [], + "abstract": "We introduce DrawTalking, an approach to building and controlling interactive worlds by sketching and speaking while telling stories. It emphasizes user control and flexibility, and gives programming-like capability without requiring code. An early open-ended study with our prototype shows that the mechanics resonate and are applicable to many creative-exploratory use cases, with the potential to inspire and inform research in future natural interfaces for creative exploration and authoring.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "New York University", + "dsl": "" + } + ], + "personId": 170618 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169841 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169812 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Diego", + "institution": "University of California, San Diego", + "dsl": "Department of Cognitive Science and Design Lab" + } + ], + "personId": 170393 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "New York University", + "dsl": "Future Reality Lab" + } + ], + "personId": 169849 + } + ] + }, + { + "id": 170731, + "typeId": 13748, + "title": "Speed-Modulated Ironing: High-Resolution Shade and Texture Gradients in Single-Material 3D Printing", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676456" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2042", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171040, + 175080 + ], + "eventIds": [], + "abstract": "We present Speed-Modulated Ironing, a new fabrication method for programming visual and tactile properties in single-material 3D printing. We use one nozzle to 3D print and a second nozzle to reheat printed areas at varying speeds, controlling the material's temperature-response. The rapid adjustments of speed allow for fine-grained reheating, enabling high-resolution color and texture variations. We implemented our method in a tool that allows users to assign desired properties to 3D models and creates corresponding 3D printing instructions. We demonstrate our method with three temperature-responsive materials: a foaming filament, a filament with wood fibers, and a filament with cork particles. These filaments respond to temperature by changing color, roughness, transparency, and gloss. Our technical evaluation reveals the capabilities of our method in achieving sufficient resolution and color shade range that allows surface details such as small text, photos, and QR codes on 3D-printed objects. Finally, we provide application examples demonstrating the new design capabilities enabled by Speed-Modulated Ironing.", + "authors": [ + { + "affiliations": [ + { + "country": "Netherlands", + "state": "", + "city": "Delft", + "institution": "Delft University of Technology", + "dsl": "" + } + ], + "personId": 170305 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 169696 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + }, + { + "country": "Turkey", + "state": "", + "city": "Istanbul", + "institution": "Boğaziçi University", + "dsl": "" + } + ], + "personId": 169779 + }, + { + "affiliations": [ + { + "country": "Netherlands", + "state": "", + "city": "Delft", + "institution": "Delft University of Technology", + "dsl": "" + }, + { + "country": "Netherlands", + "state": "", + "city": "Delft", + "institution": "Delft University of Technology", + "dsl": "" + } + ], + "personId": 170318 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170347 + }, + { + "affiliations": [ + { + "country": "Netherlands", + "state": "", + "city": "Delft", + "institution": "Delft University of Technology", + "dsl": "" + } + ], + "personId": 170622 + } + ] + }, + { + "id": 170732, + "typeId": 13748, + "title": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676461" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3252", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175077, + 171047 + ], + "eventIds": [], + "abstract": "There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few. ", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Department of Computer Science" + } + ], + "personId": 170682 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169959 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170402 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Evanston", + "institution": "Northwestern University", + "dsl": "Computer Science" + } + ], + "personId": 169889 + } + ] + }, + { + "id": 170733, + "typeId": 13748, + "title": "Augmented Object Intelligence with XR-Objects", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676379" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5792", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171026, + 175087 + ], + "eventIds": [], + "abstract": "Seamless integration of physical objects as interactive digital entities remains a challenge for spatial computing. This paper explores Augmented Object Intelligence (AOI) in the context of XR, an interaction paradigm that aims to blur the lines between digital and physical by equipping real-world objects with the ability to interact as if they were digital, where every object has the potential to serve as a portal to digital functionalities. Our approach utilizes real-time object segmentation and classification, combined with the power of Multimodal Large Language Models (MLLMs), to facilitate these interactions without the need for object pre-registration. We implement the AOI concept in the form of XR-Objects, an open-source prototype system that provides a platform for users to engage with their physical environment in contextually relevant ways using object-based context menus. This system enables analog objects to not only convey information but also to initiate digital actions, such as querying for details or executing tasks. Our contributions are threefold: (1) we define the AOI concept and detail its advantages over traditional AI assistants, (2) detail the XR-Objects system’s open-source design and implementation, and (3) show its versatility through various use cases and a user study.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Zurich", + "institution": "Google", + "dsl": "" + } + ], + "personId": 169779 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170507 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 169889 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170605 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170270 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170712 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170006 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "Google Research", + "dsl": "" + } + ], + "personId": 170704 + } + ] + }, + { + "id": 170734, + "typeId": 13744, + "title": "Haptic Devices with Variable Volume Using Spiral Springs", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686752" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1115", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Virtual reality", + "Handheld haptics", + "Shape-changing", + "Spiral spring", + "Grasping" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Controllers that convey tactile sensations of object size in virtual environments are crucial for user interaction.\r\nHowever, existing research often faces commercialization constraints due to the complexity and high number of actuators required. This study proposes a haptic device that utilizes a spiral spring structure that offers approximately three times the variable range with a single actuator. This system can quickly render the diameters of objects grasped by users and simulate continuous volume changes, such as inflating a balloon. Due to its simple structure and wide variable range, this system is expected to be suitable for various scenarios, providing users with a more immersive virtual reality experience.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + } + ], + "personId": 170354 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + } + ], + "personId": 170431 + } + ] + }, + { + "id": 170735, + "typeId": 13744, + "title": "DishAgent: Enhancing Dining Experiences through LLM-Based Smart Dishes", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686755" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1116", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Human Food Interaction", + "Large Language Model", + "Smart Dish" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "With the rapid advancement of smart technologies, there is an increasing demand to enhance everyday experiences, including dining. Recent Human-Computer Interaction (HCI) research has begun to emphasize the aesthetic, affective, sensual, and sociocultural qualities of directly interacting with food. However, these technologies are often constrained by the material properties of food, limiting their everyday applicability. This research introduces DishAgent, an innovative device equipped with a Large Language Model (LLM)-based smart dish and a swarm robotics system. DishAgent adapts to various dining scenarios by generating appropriate conversational contexts and coordinating the action commands of swarm robots, thereby enhancing the dining experience through real-time interaction. This paper explores the applications of DishAgent in intelligent dining guidance, dietary behavior intervention, food information query and social companionship., aiming to fill the critical gap in current technologies for simply and intuitively enhancing dining experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "Academy of Arts & Design" + }, + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 170503 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "Academy of Arts and Design" + }, + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 170190 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "seattle", + "institution": "Washington ", + "dsl": "" + } + ], + "personId": 170076 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 169966 + }, + { + "affiliations": [ + { + "country": "China", + "state": "beijing", + "city": "beijng, Haidian", + "institution": "The Future Laboratory", + "dsl": "Tsinghua universityThe Future Laboratory" + } + ], + "personId": 170329 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170195 + } + ] + }, + { + "id": 170736, + "typeId": 13748, + "title": "Facilitating the Parametric Definition of Geometric Properties in Programming-Based CAD", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676417" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5303", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171040, + 175080 + ], + "eventIds": [], + "abstract": "Parametric Computer-aided design (CAD) enables the creation of reusable models by integrating variables into geometric properties, facilitating customization without a complete redesign. However, creating parametric designs in programming-based CAD presents significant challenges. Users define models in a code editor using a programming language, with the application generating a visual representation in a viewport. This process involves complex programming and arithmetic expressions to describe geometric properties, linking various object properties to create parametric designs. Unfortunately, these applications lack assistance, making the process unnecessarily demanding. We propose a solution that allows users to retrieve parametric expressions from the visual representation for reuse in the code, streamlining the design process. We demonstrated this concept through a proof-of-concept implemented in the programming-based CAD application, OpenSCAD, and conducted an experiment with 11 users. Our findings suggest that this solution could significantly reduce design errors, improve interactivity and engagement in the design process, and lower the entry barrier for newcomers by reducing the mathematical skills typically required in programming-based CAD applications", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Ottawa", + "institution": "Carleton University", + "dsl": "School of Information Technology" + } + ], + "personId": 169705 + }, + { + "affiliations": [ + { + "country": "France", + "state": "", + "city": "Lille", + "institution": "Université de Lille", + "dsl": "" + } + ], + "personId": 170405 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Ottawa", + "institution": "Carleton University", + "dsl": "" + } + ], + "personId": 170159 + }, + { + "affiliations": [ + { + "country": "France", + "state": "", + "city": "Lille", + "institution": "Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9189 CRIStAL", + "dsl": "" + } + ], + "personId": 169810 + } + ] + }, + { + "id": 170737, + "typeId": 13756, + "title": "VibraHand: In-Hand Superpower Enabling Spying, Precognition, and Telekinesis", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686728" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24d-7106", + "source": "PCS", + "trackId": 13203, + "tags": [], + "keywords": [], + "sessionIds": [ + 171027 + ], + "eventIds": [], + "abstract": "This work presents a novel integration of multiple sensing and control technologies into a hand-worn device, enabling users to experience superpowers such as remote eavesdropping, telekinesis, and precognition. By leveraging techniques such as surface vibration sensing, ultrasound, and mmWave radar, the device facilitates expressive and intuitive in-hand interactions. Additionally, the use of acoustic levitation for contactless object manipulation extends the scope of wearable interactions. This innovative approach enhances the functionality of wearable devices for the future of seamless and powerful user experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Electrical Engineering" + } + ], + "personId": 170589 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Deajon", + "institution": "KAIST", + "dsl": "School of Electrical Engineering" + } + ], + "personId": 169807 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Electrical Engineering" + } + ], + "personId": 169708 + } + ] + }, + { + "id": 170738, + "typeId": 13756, + "title": "CrAIzy MIDI: AI-powered Wearable Musical Instrumental for Novice Player", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686735" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24d-5842", + "source": "PCS", + "trackId": 13203, + "tags": [], + "keywords": [], + "sessionIds": [ + 171027 + ], + "eventIds": [], + "abstract": "Playing music is a deeply fulfilling and universally cherished activity, yet the steep learning curve often discourages novice amateurs. Traditional music creation demands significant time and effort to master musical theory, instrumental mechanics, motor skills, and notation reading. To lower these barriers, innovative technology-driven approaches are necessary. This proposal introduces CrAIzy MIDI, an AI-powered wearable musical instrument designed to simplify and enhance the music-playing experience for beginners. CrAIzy MIDI integrates three key technologies: wearable user interfaces, AI-generated music, and multi-modality tools. The wearable interface allows users to play multiple instruments using intuitive finger and palm movements, reducing the complexity of traditional instruments. AI-generated music segments enable users to input a few pitches and have the AI complete the musical piece, aiding beginners in overcoming composition challenges. The multi-modality experience enhances engagement by allowing adjustments in music effects through visual stimuli such as light color and intensity changes. Together, these features make music creation more accessible and enjoyable, fostering continuous practice and exploration for novice musicians.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "Guangdong", + "city": "Guangzhou", + "institution": "Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "Computational Media and Arts (CMA) Lab" + }, + { + "country": "China", + "state": "Jiangsu", + "city": "Suzhou", + "institution": "Duke Kunshan University", + "dsl": "" + } + ], + "personId": 169928 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Guangdong", + "city": "Guangzhou", + "institution": "The Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "Computational Media and Arts (CMA) Lab" + }, + { + "country": "China", + "state": "Jiangsu", + "city": "Kunshan", + "institution": "Duke Kunshan University", + "dsl": "" + } + ], + "personId": 171229 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Guangzhou", + "institution": "The Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "Computational Media and Arts (CMA) Lab" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Tongji University", + "dsl": "College of Design and Innovation" + } + ], + "personId": 171230 + } + ] + }, + { + "id": 170739, + "typeId": 13748, + "title": "SeamPose: Repurposing Seams as Capacitive Sensors in a Shirt for Upper-Body Pose Tracking", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676341" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7049", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175077, + 171047 + ], + "eventIds": [], + "abstract": "Seams are areas of overlapping fabric formed by stitching two or more pieces of fabric together in the cut-and-sew apparel manufacturing process. In SeamPose, we repurposed seams as capacitive sensors in a shirt for continuous upper-body pose estimation. Compared to previous all-textile motion-capturing garments that place the electrodes on the clothing surface, our solution leverages existing seams inside of a shirt by machine-sewing insulated conductive threads over the seams. The unique invisibilities and placements of the seams afford the sensing shirt to look and wear similarly as a conventional shirt while providing exciting pose-tracking capabilities. To validate this approach, we implemented a proof-of-concept untethered shirt with 8 capacitive sensing seams. With a 12-participant user study, our customized deep-learning pipeline accurately estimates the relative (to the pelvis) upper-body 3D joint positions with a mean per joint position error (MPJPE) of 6.0 cm. SeamPose represents a step towards unobtrusive integration of smart clothing for everyday pose estimation.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "" + } + ], + "personId": 169900 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "" + } + ], + "personId": 169681 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University ", + "dsl": "SciFi Lab" + } + ], + "personId": 170684 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "" + } + ], + "personId": 169860 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University Bowers CIS", + "dsl": "SciFi Lab" + } + ], + "personId": 170066 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "" + } + ], + "personId": 170137 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "Information Science" + } + ], + "personId": 170632 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "Information Science" + } + ], + "personId": 169845 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "ITHACA", + "institution": "Cornell University", + "dsl": "Computing and Information Science" + } + ], + "personId": 169867 + } + ] + }, + { + "id": 170740, + "typeId": 13749, + "title": "A New Approach for Volumetric Knitting", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686337" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-2967", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175068, + 171049 + ], + "eventIds": [], + "abstract": "Unlike 3D printers, which offer the ability to fabricate nearly arbitrary geometric forms, most textile fabrication processes are limited to the creation of sheets or hollow surface-based forms. This poster presents a new machine architecture to directly produce volumetric (solid 3D) knitted forms using a 2D bed of knitting needles, \r\nrather than the 1D line of needles used in conventional knitting. We describe a small prototype with 4x4 needles, and demonstrate that it can create fully volumetric knits, including overhangs.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "Information Science" + } + ], + "personId": 169845 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "Information Science" + } + ], + "personId": 169835 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Ithaca High School", + "dsl": "" + } + ], + "personId": 170087 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170657 + } + ] + }, + { + "id": 170741, + "typeId": 13748, + "title": "DiscipLink: Unfolding Interdisciplinary Information Seeking Process via Human-AI Co-Exploration", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676366" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3928", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175083, + 171048 + ], + "eventIds": [], + "abstract": "Interdisciplinary studies often require researchers to explore literature in diverse branches of knowledge. Yet, navigating through the highly scattered knowledge from unfamiliar disciplines poses a significant challenge. In this paper, we introduce DiscipLink, a novel interactive system that facilitates collaboration between researchers and large language models (LLMs) in interdisciplinary information seeking (IIS). Based on users' topic of interest, DiscipLink initiates exploratory questions from the perspectives of possible relevant fields of study, and users can further tailor these questions. DiscipLink then supports users in searching and screening papers under selected questions by automatically expanding queries with disciplinary-specific terminologies, extracting themes from retrieved papers, and highlighting the connections between papers and questions. Our evaluation, comprising a within-subject comparative experiment and an open-ended exploratory study, reveals that DiscipLink can effectively support researchers in breaking down disciplinary boundaries and integrating scattered knowledge in diverse fields. The findings underscore the potential of LLM-powered tools in fostering information-seeking practices and bolstering interdisciplinary research.", + "authors": [ + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "Hong Kong University of Science and Technology", + "dsl": "" + } + ], + "personId": 169834 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "Hong Kong University of Science and Technology", + "dsl": "CSE" + } + ], + "personId": 169832 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "New Territories", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170277 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Nanjing", + "institution": "Southeast University", + "dsl": "" + } + ], + "personId": 169927 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "HKUST", + "dsl": "HKUST" + } + ], + "personId": 170457 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "Hong Kong University of Science and Technology", + "dsl": "" + } + ], + "personId": 170268 + } + ] + }, + { + "id": 170742, + "typeId": 13748, + "title": "PortaChrome: A Portable Contact Light Source for Integrated Re-Programmable Multi-Color Textures", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676458" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5308", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171026, + 175087 + ], + "eventIds": [], + "abstract": "In this paper, we present PortaChrome, a portable light source that can be attached to everyday objects to reprogram the color and texture of surfaces that come in contact with them. When PortaChrome makes contact with objects previously coated with photochromic dye, the UV and RGB LEDs inside PortaChrome create multi-color textures on the objects. In contrast to prior work, which used projectors for the color-change, PortaChrome has a thin and flexible form factor, which allows the color-change process to be integrated into everyday user interaction. Because of the close distance between the light source and the photochromic object, PortaChrome creates color textures in less than 4 minutes on average, which is 8 times faster than prior work. We demonstrate PortaChrome with four application examples, including data visualizations on textiles and dynamic designs on wearables. ", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170620 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "" + } + ], + "personId": 170698 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Department of Electrical Engineering and Computer Sciences" + } + ], + "personId": 170512 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170136 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170340 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 169939 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170651 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170413 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169695 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170347 + } + ] + }, + { + "id": 170743, + "typeId": 13748, + "title": "ScrapMap: Interactive Color Layout for Scrap Quilting", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676404" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2834", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171020, + 175086 + ], + "eventIds": [], + "abstract": "Scrap quilting is a popular sewing process that involves combining leftover pieces of fabric into traditional patchwork designs. Imagining the possibilities for these leftovers and arranging the fabrics in such a way that achieves visual goals, such as high contrast, can be challenging given the large number of potential fabric assignments within the quilt's design. We formulate the task of designing a scrap quilt as a graph coloring problem with domain-specific coloring and material constraints. Our interactive tool called ScrapMap helps quilters explore these potential designs given their available materials by leveraging the hierarchy of scrap quilt construction (e.g., quilt blocks and motifs) and providing user-directed automatic block coloring suggestions. Our user evaluation indicates that quilters find ScrapMap useful for helping them consider new ways to use their scraps and create visually striking quilts.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169884 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 170294 + } + ] + }, + { + "id": 170744, + "typeId": 13744, + "title": "Demonstration of MouthIO: Customizable Oral User Interfaces with Integrated Sensing and Actuation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686758" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1128", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Oral Interface", + "Wearable Computing", + "Fabrication", + "Flexible Circuits" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "MouthIO is the first customizable intraoral user interface that can be equipped with various sensors and output components. It consists of an SLA-printed brace that houses a flexible PCB within a bite-proof enclosure positioned between the molar teeth and inner cheeks. All parts in contact with the oral cavity are made of bio-compatible materials to ensure safety, while the design takes into account both comfort and portability. We demonstrate MouthIO through three application examples ranging from beverage consumption monitoring, health monitoring, to assistive technology.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 170394 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169700 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 170127 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Computer Science, Aarhus University", + "dsl": "" + } + ], + "personId": 170491 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170347 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169695 + } + ] + }, + { + "id": 170745, + "typeId": 13744, + "title": "Eye-Hand Movement of Objects in Near Space Extended Reality ", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1129", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Hand-tracking in Extended Reality (XR) enables moving objects in near space with direct hand gestures, to pick, drag and drop objects in 3D. In this work, we investigate the use of eye-tracking to reduce the effort involved in this interaction. As the eyes naturally look ahead to the target for a drag operation, the principal idea is to map the translation of the object in the image plane to gaze, such that the hand only needs to control the depth component of the operation. We have implemented four techniques that explore two factors: the use of gaze only to move objects in X-Y vs. final refinement by hand, and the use of hand input in the Z axis to directly move objects vs. indirectly via a transfer function. We compared all four techniques in a user study (N=24) against baselines of direct and indirect hand input. We detail user performance, effort and experience trade-offs and show that all eye-hand techniques significantly reduce physical effort over direct gestures, pointing toward effortless drag-and-drop for XR environments.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus ", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169971 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170269 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Konstanz", + "institution": "University of Konstanz", + "dsl": "HCI Group" + } + ], + "personId": 169906 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Lancaster", + "institution": "Lancaster University", + "dsl": "" + }, + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169997 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169856 + } + ] + }, + { + "id": 170746, + "typeId": 13744, + "title": "MOCHA: Model Optimization through Collaborative Human-AI Alignment", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686760" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1009", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Counterfactual Annotation", + "Variation Theory of Human Learning" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We present MOCHA, a novel interactive system designed to enhance data annotation in natural language processing. MOCHA integrates active learning with counterfactual data augmentation, allowing users to better align model behaviors with their intentions, preferences, and values through annotations. Utilizing principles from Variation Theory and Structural Alignment Theory, MOCHA (1) generates counterfactual examples that reveal key data variations and commonalities for users to annotate; and (2) presents them in a way that highlights shared analogical structures. This design reduces the cognitive load on users, making it easier for them to understand and reflect on the data. Consequently, this approach not only improves the clarity and efficiency of annotation but also fosters the creation of high-quality datasets and more effectively trained models.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "Notre Dame", + "institution": "University of Notre Dame", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170193 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Allston", + "institution": "Harvard University", + "dsl": "SEAS" + } + ], + "personId": 170471 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "Notre Dame", + "institution": "University of Notre Dame", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 169973 + } + ] + }, + { + "id": 170747, + "typeId": 13744, + "title": "Demonstrating Uncertainty-aware Rapid Touch and Text Input for Virtual Reality from Egocentric Vision", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1120", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "While passive surfaces offer numerous benefits for interaction in Virtual Reality, reliably detecting touch input solely from head-mounted cameras has been a long-standing challenge. Camera specifics, hand self-occlusion, and rapid movements of both head and fingers introduce uncertainty about the exact location of touch events. Existing methods have not achieved the performance necessary for robust interaction.\r\n\r\nWe present a real-time pipeline that captures touch input from all ten fingers on any physical surface, purely based on egocentric hand tracking. Our method, TouchInsight, includes a neural network to predict the moment and finger involved in making contact, along with the touch location. This location is represented by a bivariate Gaussian distribution to account for the inherent uncertainties in the inputs, which TouchInsight resolves through contextual priors to more accurately infer user input.\r\n\r\nWe demonstrate the effectiveness of our approach through a surface-aligned gaming experience and a core application of dexterous touch input: two-handed text entry.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Reality Labs", + "dsl": "" + }, + { + "country": "Switzerland", + "state": "", + "city": "Zürich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 170554 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Facebook Reality Labs", + "dsl": "" + } + ], + "personId": 170158 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Reality Labs Research", + "dsl": "Meta" + } + ], + "personId": 170415 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Facebook", + "dsl": "Facebook Reality Labs" + } + ], + "personId": 170353 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Reality Labs", + "dsl": "" + } + ], + "personId": 170336 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 170358 + } + ] + }, + { + "id": 170748, + "typeId": 13744, + "title": "Hands-on, Hands-off: Gaze-Assisted Bimanual 3D Interaction", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1121", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Interactions with objects in 3D frequently require complex manipulations beyond selection, hence gaze and pinch can fall short as a technique. Even simple drag and drop can benefit from further hand tracking, not to mention rotation of objects or bimanual formations to move multiple pieces or attach parts. Interactions of this type map naturally to the use of both hands for symmetric and asymmetric input, where framing - such as a rotation - of the object by the non-dominant hand prepares the spatial reference in which the intended manipulation is performed by the dominant hand. In this work, we build on top of gaze and pinch, and explore gaze support for asymmetric bimanual input. With direct bimanual input as baseline, we consider three alternative conditions, where input by non-dominant, dominant, or both hands is indirect. We conduct a comparative study to evaluate the performance on an abstract rotate & manipulate task, revealing the merits and limitations of each method. We then implement our own learned guidelines on a series of demonstrative applications.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169937 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170592 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170367 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170507 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170006 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Lancaster", + "institution": "Lancaster University", + "dsl": "" + }, + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169997 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169856 + } + ] + }, + { + "id": 170749, + "typeId": 13744, + "title": "Demonstrating Haptic Source-Effector: Full-Body Haptics via Non-Invasive Brain Stimulation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686756" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1123", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Transcranial Magnetic Stimulation", + "Electrical Muscle Stimulation", + "Haptics" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We propose a novel concept for haptics in which one centralized on-body actuator renders haptic effects on multiple body parts by stimulating the brain, i.e., the source of the nervous system—we call this a haptic source-effector, as opposed to the traditional wearables’ approach of attaching one actuator per body part (end-effectors). We implement our concept via transcranial-magnetic-stimulation (TMS)—a non-invasive technique from neuroscience/medicine in which electromagnetic pulses safely stimulate brain areas. Our approach renders ∼15 touch/force-feedback sensations throughout the body (e.g., hands, arms, legs, feet, and jaw), all by stimulating the user's sensorimotor cortex with a single magnetic coil moved mechanically across the scalp.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170155 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Human Computer Integration Lab" + } + ], + "personId": 169960 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170068 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170376 + } + ] + }, + { + "id": 170750, + "typeId": 13744, + "title": "Experiencing Thing2Reality: Transforming 2D Content into Conditioned Multiviews and 3D Gaussian Objects for XR Communication", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686740" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1002", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "extended reality, augmented communication, image-to-3D, remote collaboration, spatial referencing, object-focused communication" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "During remote communication, participants share both digital and physical content, such as product designs, digital assets, and environments, to enhance mutual understanding.\r\nRecent advances in augmented communication have facilitated users to swiftly create and share digital 2D copies of physical objects from video feeds into a shared space.\r\nHowever, the conventional 2D representation of digital objects restricts users' ability to spatially reference items in a shared immersive environment. To address these challenges, we propose Thing2Reality, an Extended Reality (XR) communication platform designed to enhance spontaneous discussions regarding both digital and physical items during remote sessions. With Thing2Reality, users can quickly materialize ideas or physical objects in an immersive environment and share them as conditioned multiview renderings or 3D Gaussians. Our system enables users to interact with remote objects or discuss concepts in a collaborative manner.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Charlottesville", + "institution": "University of Virginia", + "dsl": "Department of Computer Science" + }, + { + "country": "United States", + "state": "Virginia", + "city": "Charlottesville", + "institution": "University of Virginia", + "dsl": "Department of Computer Science" + } + ], + "personId": 170643 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "Northeastern University", + "dsl": "Khoury College of Computer Sciences" + }, + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "Northeastern University", + "dsl": "Khoury College of Computer Sciences" + } + ], + "personId": 170497 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + }, + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170379 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google Inc.", + "dsl": "" + }, + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google Inc.", + "dsl": "" + } + ], + "personId": 170021 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "Google Research", + "dsl": "" + }, + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "Google Research", + "dsl": "" + } + ], + "personId": 170704 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Charlottesville", + "institution": "University of Virginia", + "dsl": "Department of Computer Science" + }, + { + "country": "United States", + "state": "Virginia", + "city": "Charlottesville", + "institution": "University of Virginia", + "dsl": "Department of Computer Science" + } + ], + "personId": 170039 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Google", + "dsl": "" + }, + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170605 + } + ] + }, + { + "id": 170751, + "typeId": 13748, + "title": "Memolet: Reifying the Reuse of User-AI Conversational Memories", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676388" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3263", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175082, + 171046 + ], + "eventIds": [], + "abstract": "As users engage more frequently with AI conversational agents, conversations may exceed their memory capacity, leading to failures in correctly leveraging certain memories for tailored responses. However, in finding past memories that can be reused or referenced, users need to retrieve relevant information in various conversations and articulate to the AI their intention to reuse these memories. To support this process, we introduce Memolet, an interactive object that reifies memory reuse. Users can directly manipulate Memolet to specify which memories to reuse and how to use them. We developed a system demonstrating Memolet's interaction across various memory reuse stages, including memory extraction, organization, prompt articulation, and generation refinement. We examine the system's usefulness with an N=12 within-subject study and provide design implications for future systems that support user-AI conversational memory reusing.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "David R. Cheriton School of Computer Science" + } + ], + "personId": 170459 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "School of Computer Science" + } + ], + "personId": 169723 + } + ] + }, + { + "id": 170752, + "typeId": 13749, + "title": "Dreamcrafter: Immersive Editing of 3D Radiance Fields Through Flexible, Generative Inputs and Outputs", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6080", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175071 + ], + "eventIds": [], + "abstract": "Authoring 3D scenes is a central task for spatial computing applications. Two competing visions for lowering existing barriers are (1) focus on immersive, direct manipulation of 3D content; or (2) leverage AI techniques that capture real scenes (3D Radiance Fields such as. NeRFs, 3D Gaussian Splatting) and modify them at a higher level of abstraction, at the cost of high latency. We unify the complementary strengths of these approaches and investigate how to integrate generative AI advances into real-time, immersive 3D Radiance Field editing.\r\nWe introduce Dreamcrafter, a VR-based 3D scene editing system that: (1) provides a modular architecture to integrate generative AI algorithms; (2) combines different levels of control for creating objects, including natural language and direct manipulation; and (3) introduces proxy representations that support interaction during high-latency operations.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170641 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Department of Electrical Engineering and Computer Sciences" + } + ], + "personId": 170512 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170131 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Department of Electrical Engineering and Computer Sciences (EECS)" + } + ], + "personId": 169800 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170677 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "University of California, Los Angeles", + "dsl": "" + } + ], + "personId": 169839 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 169964 + } + ] + }, + { + "id": 170753, + "typeId": 13744, + "title": " Demonstrating Rhapso: Automatically Embedding Fiber Materials into 3D Prints for Enhanced Interactivity", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1124", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We introduce Rhapso, a 3D printing system designed to automatically embed a diverse range of continuous fiber materials within 3D objects during the printing process. This approach enables integrating properties like tensile strength, light transmission, electrical conductivity, and heat generation directly into low-cost thermoplastic 3D prints. These functional objects can have intricate actuation, self-assembly, and sensing capabilities with little to no manual assembly. To achieve this, we modify a low-cost Fused Filament Fabrication (FFF) 3D printer, adding a stepper-motor-controlled fiber spool mechanism on a geared ring above the print bed. In addition to hardware, we provide routing software for precise fiber placement, which generates Gcode for printer operation. To illustrate the versatility of our system, we present a design space, design primitives, and applications that showcase its extensive potential.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Copenhagen", + "institution": "University of Copenhagen", + "dsl": "Department of Computer Science" + } + ], + "personId": 169854 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "Department of Computer Science and Information" + } + ], + "personId": 169811 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "West Midlands", + "city": "Birmingham", + "institution": "University of Birmingham", + "dsl": "School of Computer Science" + } + ], + "personId": 170147 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Copenhagen", + "institution": "University of Copenhagen", + "dsl": "Department of Computer Science" + } + ], + "personId": 170486 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University Of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170082 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Copenhagen", + "institution": "University of Copenhagen", + "dsl": "Department of Computer Science" + } + ], + "personId": 170149 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170582 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "Computer Science" + } + ], + "personId": 170113 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Birmingham", + "institution": "University of Birmingham", + "dsl": "School of Computer Science" + } + ], + "personId": 170559 + } + ] + }, + { + "id": 170755, + "typeId": 13744, + "title": "Demonstrating Speed-Modulated Ironing: High-Resolution Shade and Texture Gradients in Single-Material 3D Printing", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686772" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1004", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "3D printing", + "multi-property printing", + "gradients", + "personal fabrication", + "rapid prototyping", + "temperature-responsive filaments" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We present Speed-Modulated Ironing, a new fabrication method for programming visual and tactile properties in single-material 3D printing. We use one nozzle to 3D print and a second nozzle to reheat printed areas at varying speeds, controlling the material’s temperature-response. The rapid adjustments of speed allow for fine-grained reheating, enabling high-resolution color and texture variations. We implemented our method in a tool that allows users to assign desired properties to 3D models and creates corresponding 3D printing instructions. We demonstrate our method with three temperature-responsive materials: a foaming filament, a filament with wood fibers, and a filament with cork particles. These filaments respond to temperature by changing color, roughness, transparency, and gloss. Our method is able to achieve sufficient resolution and color shade range that allows surface details such as small text, photos, and QR codes on 3D-printed objects. Finally, we provide application examples demonstrating the new design capabilities enabled by Speed-Modulated Ironing.", + "authors": [ + { + "affiliations": [ + { + "country": "Netherlands", + "state": "", + "city": "Delft", + "institution": "Delft University of Technology", + "dsl": "" + } + ], + "personId": 170305 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 169696 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + }, + { + "country": "Turkey", + "state": "", + "city": "Istanbul", + "institution": "Boğaziçi University", + "dsl": "" + } + ], + "personId": 169779 + }, + { + "affiliations": [ + { + "country": "Netherlands", + "state": "", + "city": "Delft", + "institution": "Delft University of Technology", + "dsl": "" + } + ], + "personId": 170318 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170347 + }, + { + "affiliations": [ + { + "country": "Netherlands", + "state": "", + "city": "Delft", + "institution": "Delft University of Technology", + "dsl": "" + } + ], + "personId": 170622 + } + ] + }, + { + "id": 170756, + "typeId": 13744, + "title": "Demonstrating Augmented Breathing via Thermal Feedback in the Nose", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686773" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1125", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Breathing", + "Thermal", + "Trigeminal", + "Perception", + "Respiration" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We demonstrate a novel method to augment the feeling of breathing---enabling interactive applications to let users feel like they are inhaling more/less air (perceived nasal airflow). We achieve this effect by cooling or heating the nose in sync with the user’s inhalation. Our illusion builds on the physiology of breathing: we perceive our breath predominantly through the cooling of our nasal cavities during inhalation. This is why breathing in a \"fresh'\" cold environment feels easier than in a \"stuffy\" hot environment, even when the inhaled volume is the same. Following, we engineered a compact device worn across the septum that uses Peltier elements. We illustrate the potential of this augmented breathing in a demonstration with two tracks: a fast track for a quick experience and a slow track for an immersive virtual reality (VR) walkthrough. In the fast track, visitors can quickly experience altered breathing perception using a simple application and our devices. In the slow track, visitors can put on a VR headset and experience an interactive scene where their sense of breathing is altered (e.g., rendering ease of breathing crisp air or difficulty breathing with a deteriorated gas mask).", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169715 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170388 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170188 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170376 + } + ] + }, + { + "id": 170757, + "typeId": 13744, + "title": "X-Hair: 3D Printing Hair-like Structures with Multi-form, Multi-property and Multi-function", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1126", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "In this paper, we present X-Hair, a method that enables 3D-printed hair with various forms, properties, and functions. We developed a two-step suspend printing strategy to fabricate hair-like structures in different forms (e.g. fluff, bristle, barb) by adjusting parameters including Extrusion Length Ratio and Total Length. Moreover, a design tool is also established for users to customize hair-like structures with various properties (e.g. pointy, stiff, soft) on imported 3D models, which virtually previews the results and generates G-code files for 3D printing. We demonstrate the design space of X-Hair and evaluate the properties of them with different parameters. Through a series of applications with hair-like structures, we validate X-hair's practical usage of biomimicry, decoration, heat preservation, adhesion, and haptic interaction.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170034 + } + ] + }, + { + "id": 170758, + "typeId": 13744, + "title": "DualPad: Exploring Non-Dominant Hand Interaction on Dual-Screen Laptop Touchpads", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686751" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1005", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "dual-screen laptop", + "touchpad", + "non-dominant hand" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Dual-touchscreen laptops present opportunities for providing an expansive touchpad on the lower touchscreen. This expanded touchpad offers space for the engagement of both the dominant and non-dominant hands. In this context, it is necessary to redefine the role of the non-dominant hand. Therefore, we propose DualPad for dual-touchscreen laptops, which provides a long touchpad on the lower touchscreen. The non-dominant hand can utilize this DualPad to execute Touch Shortcut / Modifier, analogous to keyboard shortcuts and modifier keys on single-screen laptops. Moreover, we propose Dual Cursor as an example of bimanual interaction. In the demonstration, participants are expected to utilize the custom presentation program to create the given slide using two distinct methods. First, they employ the default layout of the virtual keyboard and virtual touchpad provided on the dual-touchscreen laptop. Then, they utilize DualPad for comparison.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab, School of Computing" + } + ], + "personId": 170088 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 170650 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169896 + } + ] + }, + { + "id": 170759, + "typeId": 13744, + "title": "LLM-for-X: Application-agnostic integration of Large Language Models to Support Personal Writing Workflows", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686757" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1127", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "In this demonstration, we show LLM-for-X, a system-wide short-cut layer that connects any application to backend LLM support through a lightweight popup dialog. LLM-for-X provides users with quick and easy-to-use LLM assistance without context switching to support writing and reading tasks. We show the use of LLM-for-X across several applications, such as Microsoft Office, VSCode, and Adobe Acrobat, which our tool seamlessly connects to the backends of OpenAI ChatGPT and Google Gemini. We also demonstrate the use of our system inside web apps such as Overleaf.", + "authors": [ + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zurich", + "dsl": "" + } + ], + "personId": 170173 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zurich", + "dsl": "" + } + ], + "personId": 170655 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zurich", + "dsl": "" + } + ], + "personId": 169913 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 170091 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 170358 + } + ] + }, + { + "id": 170760, + "typeId": 13744, + "title": "SealingLid: FDM 3D Printing Technique that Bends Thin Walls to Work as a Lid", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686775" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1006", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "3D printer, FDM, print time, material consumption" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "3D printers inspire user creativity by allowing them to design and create actual 3D objects.\r\nUnfortunately, printers often require long printing time and waste a lot of materials.\r\nVarious techniques have been proposed to alleviate the issues, such as modification of the model and the user's intervention during the printing process.\r\nWe propose another approach, SealingLid, that creates thin walls and bends them by the printer's head to form lids.\r\nIt does not require model modification or user intervention and works with simple unmodified FDM 3D printers.\r\nA test confirms that the technique reduces the materials used for infill and support structures.\r\nSome primitive objects are fabricated to explore the possibilities of the technique.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Shibaura Institute of Technology", + "dsl": "" + } + ], + "personId": 170198 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Shibaura Institute of Technology", + "dsl": "" + } + ], + "personId": 169805 + } + ] + }, + { + "id": 170761, + "typeId": 13748, + "title": "HandPad: Make Your Hand an On-the-go Writing Pad via Human Capacitance", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676328" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6885", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175060, + 171050 + ], + "eventIds": [], + "abstract": "The convenient text input system is a pain point for devices such as AR glasses, and it is difficult for existing solutions to balance portability and efficiency. This paper introduces HandPad, the system that turns the hand into an on-the-go touchscreen, which realizes interaction on the hand via human capacitance. HandPad achieves keystroke and handwriting inputs for letters, numbers, and Chinese characters, reducing the dependency on capacitive or pressure sensor arrays. Specifically, the system verifies the feasibility of touch point localization on the hand using the human capacitance model and proposes a handwriting recognition system based on Bi-LSTM and ResNet. The transfer learning-based system only needs a small amount of training data to build a handwriting recognition model for the target user. Experiments in real environments verify the feasibility of HandPad for keystroke (accuracy of 100%) and handwriting recognition for letters (accuracy of 99.1%), numbers (accuracy of 97.6%) and Chinese characters (accuracy of 97.9%).", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Department of Computer Science and Engineering" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170111 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Department of Computer Science and Engineering" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170210 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Department of Computer Science and Engineering" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170154 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "School of Electronic Information and Electrical Engineering" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "School of Electronic Information and Electrical Engineering" + } + ], + "personId": 169758 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "" + } + ], + "personId": 170232 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Hunan", + "city": "Changsha", + "institution": "Central South University", + "dsl": "School of Computer Science and Engineering" + }, + { + "country": "China", + "state": "Hunan", + "city": "Changsha", + "institution": "Central South University", + "dsl": "School of Computer Science and Engineering" + } + ], + "personId": 170671 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Sichuan", + "city": "Chengdu", + "institution": "University of Electronic Science and Technology of China", + "dsl": "School of Computer Science and Engineering" + }, + { + "country": "China", + "state": "Sichuan", + "city": "Chengdu", + "institution": "University of Electronic Science and Technology of China", + "dsl": "School of Computer Science and Engineering" + } + ], + "personId": 169761 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Computer Science and Engineering" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 169932 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Department of Computer Science and Engineering" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Shanghai Jiao Tong University", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170151 + } + ] + }, + { + "id": 170762, + "typeId": 13748, + "title": "\"The Data Says Otherwise\" – Towards Automated Fact-checking and Communication of Data Claims", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676359" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5311", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171023, + 175084 + ], + "eventIds": [], + "abstract": "Fact-checking data claims requires data evidence retrieval and analysis, which can become tedious and intractable when done manually. This work presents Aletheia, an automated fact-checking prototype designed to facilitate data claims verification and enhance data evidence communication. For verification, we utilize a pre-trained LLM to parse the semantics for evidence retrieval. To effectively communicate the data evidence, we design representations in two forms: data tables and visualizations, tailored to various data fact types. Additionally, we design interactions that showcase a real-world application of these techniques. We evaluate the performance of two core NLP tasks with a curated dataset comprising 400 data claims and compare the two representation forms regarding viewers’ assessment time, confidence, and preference via a user study with 20 participants. The evaluation offers insights into the feasibility and bottlenecks of using LLMs for data fact-checking tasks, potential advantages and disadvantages of using visualizations over data tables, and design recommendations for presenting data evidence.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Georgia", + "city": "Atlanta", + "institution": "Georgia Institute of Technology", + "dsl": "School of Interactive Computing" + } + ], + "personId": 170409 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170112 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170031 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169698 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170511 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Georgia", + "city": "Atlanta", + "institution": "Georgia Institute of Technology", + "dsl": "School of Interactive Computing" + } + ], + "personId": 170121 + } + ] + }, + { + "id": 170763, + "typeId": 13748, + "title": "Story-Driven: Exploring the Impact of Providing Real-time Context Information on Automated Storytelling", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676372" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3255", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171021, + 175064 + ], + "eventIds": [], + "abstract": "Stories have long captivated the human imagination with narratives that enrich our lives. Traditional storytelling methods are often static and not designed to adapt to the listener’s environment, which is full of dynamic changes. For instance, people often listen to stories in the form of podcasts or audiobooks while traveling in a car. Yet, conventional in-car storytelling systems do not embrace the adaptive potential of this space. The advent of generative AI is the key to creating content that is not just personalized but also responsive to the changing parameters of the environment. We introduce a novel system for interactive, real-time story narration that leverages environment and user context in correspondence with estimated arrival times to adjust the generated story continuously. Through two comprehensive real-world studies with a total of 30 participants in a vehicle, we assess the user experience, level of immersion, and perception of the environment provided by the prototype. Participants' feedback shows a significant improvement over traditional storytelling and highlights the importance of context information for generative storytelling systems. ", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "Baden-Württemberg", + "city": "Stuttgart", + "institution": "Porsche AG", + "dsl": "" + }, + { + "country": "Germany", + "state": "", + "city": "Ulm", + "institution": "Ulm University", + "dsl": "" + } + ], + "personId": 170004 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "Porsche AG", + "dsl": "" + } + ], + "personId": 170711 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "Porsche AG", + "dsl": "" + } + ], + "personId": 169825 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "Baden-Württemberg", + "city": "Stuttgart", + "institution": "Porsche AG", + "dsl": "" + } + ], + "personId": 170267 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Ulm", + "institution": "University of Ulm", + "dsl": "" + } + ], + "personId": 169755 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "Porsche AG", + "dsl": "" + } + ], + "personId": 170300 + } + ] + }, + { + "id": 170764, + "typeId": 13749, + "title": "“SimSnap” Framework: Designing Interaction Methods for Cross-device Applications", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686319" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-7087", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175073 + ], + "eventIds": [], + "abstract": "Despite significant developments in cross-device interaction techniques, disengagement interactions- that disconnect devices from the application- remain underexplored. This paper introduces \"SimSnap\", a touch-based cross-device interaction framework for connecting and disconnecting devices. We extended existing connection methods and developed a novel approach to disconnection. We identified design considerations for touch-based disconnecting and present our recommendations to address these issues.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Toronto Metropolitan University", + "dsl": "" + } + ], + "personId": 169943 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Toronto Metropolitan University", + "dsl": "Synaesthetic Media Lab, RTA School of Media" + } + ], + "personId": 169737 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Toronto Metropolitan University", + "dsl": "Synaesthetic Media Lab" + } + ], + "personId": 170109 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Toronto Metropolitan University", + "dsl": "" + } + ], + "personId": 170429 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Toronto Metropolitan University", + "dsl": "Synaesthetic Media Lab" + } + ], + "personId": 170245 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Ryerson University", + "dsl": "Synaesthetic Media Lab" + } + ], + "personId": 170461 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Ryerson University", + "dsl": "Synaesthetic Media Lab" + } + ], + "personId": 170506 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Toronto Metropolitan University", + "dsl": "Synaesthetic Media Lab" + } + ], + "personId": 169917 + } + ] + }, + { + "id": 170765, + "typeId": 13748, + "title": "Lumina: A Software Tool for Fostering Creativity in Designing Chinese Shadow Puppets", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676426" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3818", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171021, + 175064 + ], + "eventIds": [], + "abstract": "Shadow puppetry, a culturally rich storytelling art, faces challenges transitioning to the digital realm. Creators in the early design phase struggle with crafting intricate patterns, textures, and basic animations while adhering to stylistic conventions - hindering creativity, especially for novices. This paper presents Lumina, a tool to facilitate the early Chinese shadow puppet design stage. Lumina provides contour templates, animations, scene editing tools, and machine-generated traditional puppet patterns. These features liberate creators from tedious tasks, allowing focus on the creative process. Developed based on a formative study with puppet creators, the web-based Lumina enables wide dissemination. An evaluation with 18 participants demonstrated Lumina's effectiveness and ease of use, with participants successfully creating designs spanning traditional themes to contemporary and science-fiction concepts.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "Beijing", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 169791 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 169882 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170077 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 169899 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "The Future Laboratory", + "dsl": "Academy of Arts & Design,Tsinghua University" + } + ], + "personId": 169745 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Burnaby", + "institution": "Simon Fraser University", + "dsl": "School of Computing Science" + } + ], + "personId": 170310 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170467 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Tongji University", + "dsl": "College of Design and Innovation, Intelligent Big Data Visualization Lab" + } + ], + "personId": 170097 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170195 + } + ] + }, + { + "id": 170766, + "typeId": 13749, + "title": "Code Shaping: Iterative Code Editing with Free-form Sketching", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686324" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-4456", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175081 + ], + "eventIds": [], + "abstract": "We present an initial step towards building a system for programmers to edit code using free-form sketch annotations drawn directly onto editor and output windows. Using a working prototype system as a technical probe, an exploratory study (N=6) examines how programmers sketch to annotate Python code to communicate edits for an AI model to perform. The results reveal personalized workflow strategies and how similar annotations vary in abstractness and intention across different scenarios and users.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "School of Computer Science" + } + ], + "personId": 170459 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "School of Computer Science" + } + ], + "personId": 169723 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "Cheriton School of Computer Science" + } + ], + "personId": 170390 + } + ] + }, + { + "id": 170767, + "typeId": 13748, + "title": "JetUnit: Rendering Diverse Force Feedback in Virtual Reality Using Water Jets", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676440" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3935", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175059, + 171041 + ], + "eventIds": [], + "abstract": "We propose JetUnit, a water-based VR haptic system designed to produce force feedback with a wide spectrum of intensities and frequencies through water jets. The key challenge in designing this system lies in optimizing parameters to enable the haptic device to generate force feedback that closely replicates the most intense force produced by direct water jets while ensuring the user remains dry. In this paper, we present the key design parameters of the JetUnit wearable device determined through a set of quantitative experiments and a perception study. We further conducted a user study to assess the impact of integrating our haptic solutions into virtual reality experiences. The results revealed that, by adhering to the design principles of JetUnit, the water-based haptic system is capable of delivering diverse force feedback sensations, significantly enhancing the immersive experience in virtual reality.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170517 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170194 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University Of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170082 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "" + } + ], + "personId": 169686 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "Computer Science" + } + ], + "personId": 170113 + } + ] + }, + { + "id": 170768, + "typeId": 13744, + "title": "SoundModVR: Sound Modifications in Virtual Reality for Sound Accessibility", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686754" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1139", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Accessibility", + "Virtual reality", + "Deaf and hard of hearing", + "Sound", + "Customization" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Previous VR sound accessibility work have substituted sounds with visual or haptic output to increase VR accessibility for deaf and hard of hearing (DHH) people. However, deafness occurs on a spectrum, and many DHH people (e.g., those with partial hearing) can also benefit from manipulating audio (e.g., increasing volume at specific frequencies) instead of substituting it with another modality. In this demo paper, we present a toolkit that allows modifying sounds in VR to support DHH people. We designed and implemented 18 VR sound modification tools spanning four categories, including prioritizing sounds, modifying sound parameters, providing spatial assistance, and adding additional sounds. Evaluation of our tools with 10 DHH users across five diverse VR scenarios reveal that our toolkit can improve DHH users’ VR experience but could be further improved by providing more customization options and decreasing cognitive load. We then compiled a Unity toolkit and conducted a preliminary evaluation with six Unity VR developers. Preliminary insights show that our toolkit is easy to use but could be enhanced through modularization.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170075 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170024 + } + ] + }, + { + "id": 170769, + "typeId": 13749, + "title": "KeyFlow: Acoustic Motion Sensing for Cursor Control on Any Keyboard", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-7723", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175090 + ], + "eventIds": [], + "abstract": "Despite typing being a critical operation in the digital age, users still need to frequently switch between the mouse and keyboard while typing. We introduce KeyFlow, a tool that integrates mouse functionality into the keyboard through machine learning, allowing users to glide their fingers across the keyboard surface to move the cursor. The whole process does not press the keys down to differentiate from normal typing and avoid false touches. KeyFlow uses any computer-built-in microphones to capture the acoustic features of these gliding gestures, requiring no specialized equipment and can be set up and tested independently within 5 minutes. Our user research indicates that, compared to traditional keyboard and mouse methods, this system reduces hand movement distance by 78.3\\%, making the typing experience more focused.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170209 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Blacksburg", + "institution": "Virginia Polytechnic Institute and State University", + "dsl": "" + } + ], + "personId": 170020 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Beijing", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 169791 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170572 + } + ] + }, + { + "id": 170770, + "typeId": 13749, + "title": "Towards Multimodal Interaction with AI-Infused Shape-Changing Interfaces", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686315" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-9109", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175087, + 171049 + ], + "eventIds": [], + "abstract": "We present a proof-of-concept system exploring multimodal interaction with AI-infused Shape-Changing Interfaces. Our prototype integrates inFORCE, a 10x5 pin-based shape display, with AI tools for 3D mesh generation and editing. Users can create and modify 3D shapes through speech, gesture, and tangible inputs. We demonstrate potential applications including AI-assisted 3D modeling, adaptive physical controllers, and dynamic furniture. Our implementation, which translates text to point clouds for physical rendering, reveals both the potential and challenges of combining AI with shape-changing interfaces. This work explores how AI can enhance tangible interaction with 3D information and opens up new possibilities for multimodal shape-changing UIs.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169959 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169803 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Computer Science" + } + ], + "personId": 170576 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170706 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 170771, + "typeId": 13748, + "title": "Eye-Hand Movement of Objects in Near Space Extended Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676446" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3907", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175074, + 171031 + ], + "eventIds": [], + "abstract": "Hand-tracking in Extended Reality (XR) enables moving objects in near space with direct hand gestures, to pick, drag and drop objects in 3D. In this work, we investigate the use of eye-tracking to reduce the effort involved in this interaction. As the eyes naturally look ahead to the target for a drag operation, the principal idea is to map the translation of the object in the image plane to gaze, such that the hand only needs to control the depth component of the operation. We have implemented four techniques that explore two factors: the use of gaze only to move objects in X-Y vs.\\ extra refinement by hand, and the use of hand input in \r\n the Z axis to directly move objects vs.\\ indirectly via a transfer function. We compared all four techniques in a user study (N=24) against baselines of direct and indirect hand input. We detail user performance, effort and experience trade-offs and show that all eye-hand techniques significantly reduce physical effort over direct gestures, pointing toward effortless drag-and-drop for XR environments.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus ", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169971 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170269 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Konstanz", + "institution": "University of Konstanz", + "dsl": "HCI Group" + } + ], + "personId": 169906 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Lancaster", + "institution": "Lancaster University", + "dsl": "" + }, + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169997 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169856 + } + ] + }, + { + "id": 170772, + "typeId": 13749, + "title": "OmniQuery: Enabling Question Answering on Personal Memory by Augmenting Multimodal Album Data", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686313" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-8816", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175075, + 171049 + ], + "eventIds": [], + "abstract": "We present OmniQuery, an interactive system that augments users' personal photo albums and enables free-form question answering on users' past memories. \r\nOmniQuery processes multimodal media data in personal albums, aggregates them into related episodic memory databases in different levels, and infers semantic knowledge including personal facts like social relationships, preferences, and experiences. OmniQuery then allows users to interact with their database using natural language, giving media that directly matches the query or an exact answer supported by related media as a result.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "UCLA", + "dsl": "HCI Research" + } + ], + "personId": 170235 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Information School" + } + ], + "personId": 169876 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170130 + } + ] + }, + { + "id": 170773, + "typeId": 13744, + "title": "PronounSE: SFX Synthesizer from Language-Independent Vocal Mimic Representation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686748" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1010", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Sound Synthesis", + "Vocal Mimic" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Sound creators make various sound effects (SFX) depending on auditory events utilizing knowledge, techniques, and experience. These are challenging tasks for inexperienced creators. In this research, we focus on the fact that it is relatively easy for anyone to mimic SFX with utterances, and we propose a novel interactive technique called PronounSE. It can synthesize SFX to reflect subtle sound nuances by the vocal representation with language-independent sound mimicry. PronounSE currently consists of a Transformer that converts a mel-spectrogram of utterance mimic sound into a mel-spectrogram of SFX, and iSTFTNet, as a neural-vocoder, reconstructs a waveform for a synthesized SFX. We built a dataset for PronounSE that especially picked explosion sounds which we could easily represent with many nuances. This paper describes the model of PronounSE, the dataset of explosion sounds and their various vocal mimic representations of plural people, and the results of high-quality synthesized SFXs from untrained vocal representations interactively.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Kyoto", + "institution": "Graduate School of Kyoto Sangyo University", + "dsl": "Department of Frontier Informatics" + }, + { + "country": "Japan", + "state": "", + "city": "Kyoto", + "institution": "Graduate School of Kyoto Sangyo University", + "dsl": "Department of Frontier Informatics" + } + ], + "personId": 170099 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Kyoto", + "city": "Kyoto", + "institution": "Kyoto Sangyo University", + "dsl": "Faculty of Information Science and Engineering" + }, + { + "country": "Japan", + "state": "Kyoto", + "city": "Kyoto", + "institution": "Kyoto Sangyo University", + "dsl": "Faculty of Information Science and Engineering" + } + ], + "personId": 169740 + } + ] + }, + { + "id": 170774, + "typeId": 13744, + "title": "Demonstrating GROMIT: Opportunities and Challenges for Runtime Behavior Generation", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1132", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Many components of games, from texture materials to map layouts, can be generated algorithmically rather than manually in a process known as Procedural Content Generation. The ability for LLMs to translate plaintext requests into game code allows for the creation of systems that can generate game behaviors, fundamentally updating the rules of a game. We explore the implications of this in our accompanying paper through GROMIT, a novel LLM-based runtime behavior generation system for Unity. This demo showcases GROMIT through the Adventure Game demo described in the paper, updated to use the latest version of Chat-GPT. In the demo, players take the role of a wizard exploring a dungeon by fighting enemies and solving simple puzzles. An initial set of spells was created by the authors, and the player can combine existing spells to create new ones. When a novel spell combination occurs, GROMIT is invoked to generate a new resulting spell, the code for which is compiled at runtime. This demo serves as a general example of Runtime Behavior Generation, and as a showcase of the potential high impact generated behaviors can have, since some generated spells may indirectly affect the puzzle system of the game.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170619 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170673 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170642 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 170496 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 169964 + } + ] + }, + { + "id": 170775, + "typeId": 13748, + "title": "Power-over-Skin: Full-Body Wearables Powered By Intra-Body RF Energy", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676394" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7034", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175060, + 171050 + ], + "eventIds": [], + "abstract": "Powerful computing devices are now small enough to be easily worn on the body. However, batteries pose a major design and user experience obstacle, adding weight and volume, and generally requiring periodic device removal and recharging. In response, we developed Power-over-Skin, an approach using the human body itself to deliver power to many distributed, battery-free, worn devices. We demonstrate power delivery from on-body distances as far as from head-to-toe, with sufficient energy to power microcontrollers capable of sensing and wireless communication. We share results from a study campaign that informed our implementation, as well as experiments that validate our final system. We conclude with several demonstration devices, ranging from input controllers to longitudinal bio-sensors, which highlight the efficacy and potential of our approach.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "School of Computer Science" + } + ], + "personId": 169806 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 169949 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170374 + } + ] + }, + { + "id": 170776, + "typeId": 13744, + "title": "Demonstrating PopCore: Personal Fabrication of 3D Foamcore Models for Professional High-Quality Applications in Design and Architecture", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686761" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1135", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Personal fabrication", + "Laser cutting", + "Rapid prototyping", + "Manual assembly" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "PopCore is a fabrication technique that laser-cuts 3D models from paper-foam-paper sandwich materials. Its key elements are two laser-cut lever mechanisms that allow users to break off surrounding residue material, thereby “excavating” joints efficiently and with very high precision, which PopCore produces by laser cutting from the top and bottom. This produces flush joints, folded edges that are perfectly straight, and no burn marks—giving models a homogeneous, clean look. This allows applying personal fabrication to new fields, including industrial design, architecture, and packaging design, that require a visual finish beyond what traditional personal fabrication delivers. We demonstrate the algorithms and a software tool that generates PopCore automatically. Our user study participants rated PopCore models significantly more visually appealing (7.9/9) than models created using techniques from the related work (4.7/9 and 2.3/9) and suitable for presentation models (11/12 participants), products (10/12 participants) and high-end packaging (10/12 participants).", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 170474 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "Human Computer Interaction" + } + ], + "personId": 170693 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 170370 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "Brandenburg", + "city": "Potsdam", + "institution": "Hasso-Plattner-Institute", + "dsl": "Human-Computer-Interaction lab" + } + ], + "personId": 170373 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 170407 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 169978 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 170447 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 170350 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 169809 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 170420 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 170552 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Potsdam", + "institution": "Hasso Plattner Institute", + "dsl": "" + } + ], + "personId": 170282 + } + ] + }, + { + "id": 170777, + "typeId": 13744, + "title": "StegoType: Surface Typing from Egocentric Cameras", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686762" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1136", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Text input is a critical component of any general purpose computing system, yet efficient and natural text input remains a challenge in AR and VR. Headset based hand-tracking has recently become pervasive among consumer VR devices and affords the opportunity to enable touch typing on virtual keyboards. We present an approach for decoding touch typing on uninstrumented flat surfaces using only egocentric camera-based hand-tracking as input. While egocentric hand-tracking accuracy is limited by issues like self occlusion and image fidelity, we show that a sufficiently diverse training set of hand motions paired with typed text can enable a deep learning model to extract signal from this noisy input.\r\n\r\nFurthermore, by carefully designing a closed-loop data collection process, we can train an end-to-end text decoder that accounts for natural sloppy typing on virtual keyboards.\r\nWe evaluate our work with a user study (n=18) showing a mean online throughput of 42.4 WPM with an uncorrected error rate (UER) of 7% with our method compared to a physical keyboard baseline of 74.5 WPM at 0.8% UER, showing progress towards unlocking productivity and high throughput use cases in AR/VR.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170240 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170415 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170604 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 169958 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 169759 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 169956 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170308 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Houghton", + "institution": "Michigan Technological University", + "dsl": "Computer Science" + } + ], + "personId": 170580 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170353 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170336 + } + ] + }, + { + "id": 170778, + "typeId": 13744, + "title": "Toyteller: Toy-Playing with Character Symbols for AI-Powered Visual Storytelling", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686781" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1015", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "visual storytelling, toy-playing, generative AI" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We introduce Toyteller, an AI-powered storytelling system that allows users to generate a mix of story texts and visuals by directly manipulating character symbols like they are playing with toys. Anthropomorphized motions of character symbols can convey rich and nuanced social interactions between characters; Toyteller leverages these motions as (1) a means for users to steer story text generation and (2) an output format for generated visual accompaniment to user-provided story texts and user-controlled character motions. We enabled motion-steered story text generation and text-steered motion generation by mapping symbol motions and story texts onto a shared semantic vector space so that motion generation models and large language models can use it as a translational layer. We hope this demonstration sheds light on extending the range of modalities supported by generative human-AI co-creation systems.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Midjourney", + "dsl": "" + } + ], + "personId": 169907 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Midjourney", + "dsl": "" + } + ], + "personId": 169783 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Midjourney", + "dsl": "" + } + ], + "personId": 169936 + } + ] + }, + { + "id": 170779, + "typeId": 13744, + "title": "Active Haptic Feedback for a Virtual Wrist-Anchored User Interface", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686765" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1137", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Haptic Devices", + "Gestural Input", + "Human-Computer Interaction", + "Human-Centric Computing", + "Body-Anchored Interfaces" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "The presented system combines a virtual wrist-anchored user interface (UI) with a new low-profile, wrist-worn device that provides salient and expressive haptic feedback such as contact, pressure and broad-bandwidth vibration. This active feedback is used to add tactile cues to interactions with virtual mid-air UI elements that track the user's wrist; we demonstrate a simple menu-interaction task to showcase the utility of haptics for interactions with virtual buttons and sliders. Moving forward, we intend to use this platform to develop haptic guidelines for body-anchored interfaces and test multiple haptic devices across the body to create engaging interactions.", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "Baden-Württemberg", + "city": "Stuttgart", + "institution": "Max Planck Institute for Intelligent Systems", + "dsl": "Haptic Intelligence Department" + } + ], + "personId": 170542 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "Baden-Württemberg", + "city": "Stuttgart", + "institution": "Max Planck Institute for Intelligent Systems", + "dsl": "Robotic Materials Department" + }, + { + "country": "Germany", + "state": "Baden-Württemberg", + "city": "Stuttgart", + "institution": "Max Planck Institute for Intelligent Systems", + "dsl": "Haptic Intelligence Department" + } + ], + "personId": 169776 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "University of Stuttgart", + "dsl": "VISUS" + } + ], + "personId": 169950 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "Max Planck Institute for Intelligent Systems", + "dsl": "Haptic Intelligence Department" + } + ], + "personId": 169935 + } + ] + }, + { + "id": 170780, + "typeId": 13744, + "title": "TorqueCapsules: Fully-Encapsulated Flywheel Actuation Modules for Designing and Prototyping Movement-Based and Kinesthetic Interaction", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1017", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Flywheels are unique, versatile actuators that store and convert kinetic energy to torque, widely utilized in aerospace, robotics, haptics, and more. However, prototyping interaction using flywheels is not trivial due to safety concerns, unintuitive principles, and complex control.\r\nWe present TorqueCapsules: self-contained, fully-encapsulated flywheel actuation modules that make the flywheel actuators easy to control, safe to interact and prototype with, and quick to reconfigure and customize. By fully encapsulating a wireless microcontroller and a battery, the module can be readily attached, embedded, or stuck to everyday objects, worn to people’s bodies, or combined with other devices. With our custom GUI, both novice and expert users can easily control multiple modules to design and prototype flywheel-based actuation to prototype movements and kinesthetic haptics. We demonstrate various applications, including actuated everyday objects, wearable haptics, and expressive robots. We conducted workshops for novices and experts to employ TorqueCapsules to collect qualitative feedback and further application examples.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170126 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170466 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Independent Researcher", + "dsl": "" + } + ], + "personId": 169946 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170248 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 170781, + "typeId": 13748, + "title": "Palmrest+: Expanding Laptop Input Space with Shear Force on Palm-Resting Area", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676371" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6977", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175063, + 171043 + ], + "eventIds": [], + "abstract": "The palmrest area of laptops has the potential as an additional input space, considering its consistent palm contact during keyboard interaction. We propose Palmrest+, leveraging shear force exerted on the palmrest area. We suggest two input techniques: Palmrest Shortcut, for instant shortcut execution, and Palmrest Joystick, for continuous value input. These allow seamless and subtle input amidst keyboard typing. Evaluation of Palmrest Shortcut against conventional keyboard shortcuts revealed faster performance for applying shear force in unimanual and bimanual-manner with a significant reduction in gaze shifting. Additionally, the assessment of Palmrest Joystick against the laptop touchpad demonstrated comparable performance in selecting one- and two- dimensional targets with low-precision pointing, i.e., for short distances and large target sizes. The maximal hand displacement significantly decreased for both Palmrest Shortcut and Palmrest Joystick compared to conventional methods. These findings verify the feasibility and effectiveness of leveraging the palmrest area as an additional input space on laptops, offering promising enhanced typing-related user interaction experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169777 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + } + ], + "personId": 170627 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 170017 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169797 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169896 + } + ] + }, + { + "id": 170782, + "typeId": 13748, + "title": "WatchLink: Enhancing Smartwatches with Sensor Add-Ons via ECG Interface", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676329" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1047", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171022, + 175073 + ], + "eventIds": [], + "abstract": "We introduce a low-power communication method that lets smartwatches leverage existing electrocardiogram (ECG) hardware as a data communication interface. Our unique approach enables the connection of external, inexpensive, and low-power \"add-on\" sensors to the smartwatch, expanding its functionalities. These sensors cater to specialized user needs beyond those offered by pre-built sensor suites, at a fraction of the cost and power of traditional communication protocols, including Bluetooth Low Energy. To demonstrate the feasibility of our approach, we conduct a series of exploratory and evaluative tests to characterize the ECG interface as a communication channel on commercial smartwatches. We design a simple transmission scheme using commodity components, demonstrating cost and power benefits. Further, we build and test a suite of add-on sensors, including UV light, body temperature, buttons, and breath alcohol, all of which achieved testing objectives at low material cost and power usage. This research paves the way for personalized and user-centric wearables by offering a cost-effective solution to expand their functionalities.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School for Computer Science & Engineering" + } + ], + "personId": 169822 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 169910 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science and Engineering" + } + ], + "personId": 169908 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170608 + } + ] + }, + { + "id": 170783, + "typeId": 13748, + "title": "PortalInk: 2.5D Visual Storytelling with SVG Parallax and Waypoint Transitions", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676376" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5640", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171021, + 175064 + ], + "eventIds": [], + "abstract": "Efforts to expand the authoring of visual stories beyond the 2D canvas have commonly mapped flat imagery to 3D scenes or objects. This translation requires spatial reasoning, as artists must think in two spaces. We propose PortalInk, a tool for artists to craft and export 2.5D graphical stories while remaining in 2D space by using SVG transitions. This is achieved via a parallax effect that generates a sense of depth that can be further explored using pan and zoom interactions. Any canvas position can be saved and linked to in a closed drawn stroke, or \"portal,\" allowing the artist to create spatially discontinuous, or even infinitely looping visual trajectories. We provide three case studies and a gallery to demonstrate how artists can naturally incorporate these interactions to craft immersive comics, as well as re-purpose them to support use cases beyond drawing such as animation, slide-based presentations, web design, and digital journalism.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Rhode Island", + "city": "Providence", + "institution": "Brown University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170659 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Rhode Island", + "city": "Providence", + "institution": "Brown University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170009 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "Department of Electrical Engineering and Computer Sciences" + } + ], + "personId": 170575 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Rhode Island", + "city": "Providence", + "institution": "Brown University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169824 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Rhode Island", + "city": "Providence", + "institution": "Brown University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170465 + } + ] + }, + { + "id": 170784, + "typeId": 13748, + "title": "Clarify: Improving Model Robustness With Natural Language Corrections", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676362" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6853", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171023, + 175084 + ], + "eventIds": [], + "abstract": "The standard way to teach models is by feeding them lots of data. However, this approach often teaches models incorrect ideas because they pick up on misleading signals in the data. To prevent such misconceptions, we must necessarily provide additional information beyond the training data. Prior methods incorporate additional instance-level supervision, such as labels for misleading features or additional labels for debiased data. However, such strategies require a large amount of labeler effort. We hypothesize that people are good at providing textual feedback at the concept level, a capability that existing teaching frameworks do not leverage. We propose Clarify, a novel interface and method for interactively correcting model misconceptions. Through Clarify, users need only provide a short text description of a model's consistent failure patterns. Then, in an entirely automated way, we use such descriptions to improve the training process. Clarify is the first end-to-end system for user model correction. Our user studies show that non-expert users can successfully describe model misconceptions via Clarify, leading to increased worst-case performance in two datasets. We additionally conduct a case study on a large-scale image dataset, ImageNet, using Clarify to find and rectify 31 novel hard subpopulations.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 169848 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Dept. of Computer Science" + } + ], + "personId": 170520 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 169914 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 170106 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 169798 + } + ] + }, + { + "id": 170785, + "typeId": 13744, + "title": "ScrapMap: Interactive Color Layout for Scrap Quilting", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1140", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "ScrapMap is the artifact built for the UIST24 submission: \"ScrapMap: Interactive Color Layout for Scrap Quilting\". We would like to give UIST participants the opportunity to try out this software design tool during the conference. For our software demo, we envision being at a table with ScrapMap running on our computer. Participants will be able to walk up and interact with the tool.\r\nWe will also showcase a couple of author-sewn scrap quilts that were designed using ScrapMap.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169884 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 170294 + } + ] + }, + { + "id": 170786, + "typeId": 13744, + "title": "Demonstraion of Selfrionette: A Fingertip force to Avatar Motion and Diverse Haptics", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1020", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We propose Selfrionette, a controller that uses fingertip force input to drive avatar movements in virtual reality (VR). \r\nThis system enables users to interact with virtual objects and walk in VR using only fingertip force, overcoming physical and spatial constraints. Additionally, by fixing users' fingers, it provides users with counterforces equivalent to the applied force, allowing for diverse and wide dynamic range haptic feedback by adjusting the relationship between force input and virtual movement.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170428 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Nara", + "institution": "Nara Institute of Science and Technology", + "dsl": "" + } + ], + "personId": 169821 + } + ] + }, + { + "id": 170787, + "typeId": 13748, + "title": "EarHover: Mid-Air Gesture Recognition for Hearables Using Sound Leakage Signals", + "award": "BEST_PAPER", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676367" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2814", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171034, + 175091 + ], + "eventIds": [], + "abstract": "We introduce EarHover, an innovative system that enables mid-air gesture input for hearables. Mid-air gesture input, which eliminates the need to touch the device and thus helps to keep hands and the device clean, has been known to have high demand based on previous surveys. However, existing mid-air gesture input methods for hearables have been limited to adding cameras or infrared sensors. By focusing on the sound leakage phenomenon unique to hearables, we have realized mid-air gesture recognition using a speaker and an external microphone that are highly compatible with hearables. The signal leaked to the outside of the device due to sound leakage can be measured by an external microphone, which detects the differences in reflection characteristics caused by the hand's speed and shape during mid-air gestures.\r\nAmong 27 types of gestures, we determined the seven most suitable gestures for EarHover in terms of signal discrimination and user acceptability. We then evaluated the gesture detection and classification performance of two prototype devices (in-ear type/open-ear type) for real-world application scenarios.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Yokohama", + "institution": "Keio University", + "dsl": "" + } + ], + "personId": 170323 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Yokohama", + "institution": "Keio University", + "dsl": "Lifestyle Computing Lab" + } + ], + "personId": 170709 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Sapporo", + "institution": "Hokkaido University", + "dsl": "Information Science and Technology" + } + ], + "personId": 170161 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Ibaraki", + "city": "Tsukuba", + "institution": "University of Tsukuba", + "dsl": "IPLAB" + } + ], + "personId": 169886 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Yokohama", + "institution": "Keio University", + "dsl": "" + } + ], + "personId": 170595 + } + ] + }, + { + "id": 170788, + "typeId": 13749, + "title": "Exploring the Effects of Fantasy Level of Avatars on User Perception and Behavior", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686355" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-1653", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175072 + ], + "eventIds": [], + "abstract": "Embodying avatars in virtual reality (VR) has transformed human experiences, such as in medicine and education. However, there is limited information about users’ self-identifications and perceptions of highly fantastical avatars. This pilot study explored the impact of avatar types of low and high fantasy levels on adults’ perceptions and behaviors. Participants (N = 18) engaged in a VR experience with either a human or blue Muppet avatar to complete body movement tasks, a cube-touching game, and free-form exploration. Findings showed that participants in the high fantasy avatar condition reported higher identification with their avatar and more interest in social-emotional activities relative to the low fantasy human avatar condition. Across both conditions, participants stood extremely close to the virtual mirror. Additionally, we report on participants preferences and priorities for their future avatars. This offers insights for future research on avatar design, with implications for more engaging VR experience.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Austin", + "institution": "University of Texas at Austin", + "dsl": "School of Information" + } + ], + "personId": 170545 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Austin", + "institution": "University of Texas at Austin", + "dsl": "School of Information " + } + ], + "personId": 170044 + } + ] + }, + { + "id": 170789, + "typeId": 13748, + "title": "VoicePilot: Harnessing LLMs as Speech Interfaces for Assistive Robotics", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676401" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1605", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175067, + 171061 + ], + "eventIds": [], + "abstract": "Physically assistive robots present an opportunity to significantly increase the well-being and independence of individuals with motor impairments or other forms of disability who are unable to complete activities of daily living. Speech interfaces, especially ones that utilize Large Language Models (LLMs), can enable individuals to effectively and naturally communicate high-level commands and nuanced preferences to robots. Frameworks for integrating LLMs as interfaces to robots for high level task planning and code generation have been proposed, but fail to incorporate human-centric considerations which are essential while developing assistive interfaces. In this work, we present a framework for incorporating LLMs as speech interfaces for physically assistive robots, constructed iteratively with 3 stages of testing involving a feeding robot, culminating in an evaluation with 11 older adults at an independent living facility. We use both quantitative and qualitative data from the final study to validate our framework and additionally provide design guidelines for using LLMs as speech interfaces for assistive robots. Videos, code, and supporting files are located on our project website\\footnote{\\url{https://sites.google.com/andrew.cmu.edu/voicepilot/}}", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Robotics Institute" + } + ], + "personId": 170499 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 169991 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170439 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon", + "dsl": "Robotics Institute" + } + ], + "personId": 170299 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Mechanical Engineering Department" + } + ], + "personId": 170387 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Robotics Institute" + } + ], + "personId": 170283 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170175 + } + ] + }, + { + "id": 170790, + "typeId": 13748, + "title": "Beyond the Chat: Executable and Verifiable Text-Editing with LLMs", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676419" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2815", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171019, + 175065 + ], + "eventIds": [], + "abstract": "Conversational interfaces powered by Large Language Models (LLMs) have recently become a popular way to obtain feedback during document editing. However, standard chat-based conversational interfaces cannot explicitly surface the editing changes that they suggest. To give the author more control when editing with an LLM, we present InkSync, an editing interface that suggests executable edits directly within the document being edited. Because LLMs are known to introduce factual errors, Inksync also supports a 3-stage approach to mitigate this risk: Warn authors when a suggested edit introduces new information, help authors Verify the new information's accuracy through external search, and allow a third party to Audit with a-posteriori verification via a trace of all auto-generated content.\r\nTwo usability studies confirm the effectiveness of InkSync's components when compared to standard LLM-based chat interfaces, leading to more accurate and more efficient editing, and improved user experience.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Salesforce Research", + "dsl": "" + } + ], + "personId": 170568 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Palo Alto", + "institution": "Salesforce Research", + "dsl": "" + } + ], + "personId": 170242 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "" + } + ], + "personId": 170686 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Palo Alto", + "institution": "Salesforce", + "dsl": "Salesforce Research" + } + ], + "personId": 170025 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Palo Alto", + "institution": "Salesforce AI", + "dsl": "" + } + ], + "personId": 170355 + } + ] + }, + { + "id": 170791, + "typeId": 13748, + "title": "VisCourt: In-Situ Guidance for Interactive Tactic Training in Mixed Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676466" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2018", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171051, + 175089 + ], + "eventIds": [], + "abstract": "In team sports like basketball, understanding and executing tactics---coordinated plans of movements among players---are crucial yet complex, requiring extensive practice. These tactics require players to develop a keen sense of spatial and situational awareness. Traditional coaching methods, which mainly rely on basketball tactic boards and video instruction, often fail to bridge the gap between theoretical learning and the real-world application of tactics, due to shifts in view perspectives and a lack of direct experience with tactical scenarios. To address this challenge, we introduce VisCourt, a Mixed Reality (MR) tactic training system, in collaboration with a professional basketball team. To set up the MR training environment, we employed semi-automatic methods to simulate realistic 3D tactical scenarios and iteratively designed visual in-situ guidance. This approach enables full-body engagement in interactive training sessions on an actual basketball court and provides immediate feedback, significantly enhancing the learning experience. A user study with athletes and enthusiasts shows the effectiveness and satisfaction with VisCourt in basketball training and offers insights for the design of future SportsXR training systems.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "State Key Lab of CAD&CG" + } + ], + "personId": 170162 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "State Key Lab of CAD&CG" + } + ], + "personId": 169775 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Jiangsu", + "city": "Suzhou", + "institution": "Xi'an Jiaotong-Liverpool University", + "dsl": "School of Advanced Technology" + } + ], + "personId": 170290 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "State Key Lab of CAD&CG" + } + ], + "personId": 170514 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "State Key Lab of CAD&CG" + } + ], + "personId": 170033 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Ningbo", + "institution": "Zhejiang University", + "dsl": "School of Software Technology" + } + ], + "personId": 170138 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang Province", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "Department of Sports Science" + } + ], + "personId": 170419 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "Department of Sports Science" + } + ], + "personId": 170000 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "State Key Lab of CAD&CG" + } + ], + "personId": 170089 + } + ] + }, + { + "id": 170792, + "typeId": 13744, + "title": "Real-Time Word-Level Temporal Segmentation in Streaming Speech Recognition", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686738" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1029", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Rich-text captions are essential in helping communication for Deaf and hard-of-hearing (DHH) people, second language learners, and those with autism spectrum disorder (ASD). They also preserve nuances when converting speech to text, enhancing the realism of presentation scripts and conversation or speech logs. However, current real-time captioning systems lack the capability to alter text decorations at the word level, hindering the accurate conveyance of speaker intent. This paper reviews existing research on real-time captioning and proposes a solution that changes text decorations at the word level in real-time. As a prototype, we developed an application that adjusts word size based on the loudness of each spoken word. Beyond assisting DHH communication, this technology can be applied to multilingual translation conversations and other areas where nuanced text representation of voice input is beneficial. Our experimental results show significant improvements in conveying speaker intent, offering a more engaging and accessible captioning experience.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "Tokyo", + "city": "Bunkyo", + "institution": "the University of Tokyo", + "dsl": "Ishiguro Lab" + } + ], + "personId": 170150 + } + ] + }, + { + "id": 170793, + "typeId": 13744, + "title": "Demonstration of CARDinality: Interactive Card-shaped Robots with Locomotion and Haptics using Vibration ", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1021", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "This demo introduces a novel approach to tangible user interfaces (TUIs) by leveraging the form-factor of cards to create thin robots equipped with vibrational capabilities for locomotion and haptic feedback. The system is composed of flat-shaped robots with on-device sensing and wireless control, which offer lightweight portability and scalability. This research introduces a hardware prototype and a training approach which involves a computer vision-based method for omnidirectional locomotion. Applications include augmented card playing, educational tools, and assistive technology, which showcase Cardinality’s versatility in tangible interaction.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Department of Computer Science" + } + ], + "personId": 169974 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Department of Computer Science" + } + ], + "personId": 170540 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170271 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + }, + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "" + } + ], + "personId": 169961 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 169947 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 170794, + "typeId": 13744, + "title": "Demo of EITPose: Wearable and Practical Electrical Impedance Tomography for Continuous Hand Pose Estimation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686770" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1142", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Real-time hand pose estimation has a wide range of applications spanning gaming, robotics, and human-computer interaction. In this paper, we introduce EITPose, a wrist-worn, continuous 3D hand pose estimation approach that uses eight electrodes positioned around the forearm to model its interior impedance distribution during pose articulation. Unlike wrist-worn systems relying on cameras, EITPose has a slim profile (12 mm thick sensing strap) and is power-efficient (consuming only 0.3 W of power), making it an excellent candidate for integration into consumer electronic devices. In a user study involving 22 participants, EITPose achieves with a within-session mean per joint positional error of 11.06 mm. Its camera-free design prioritizes user privacy, yet it maintains cross-session and cross-user accuracy levels comparable to camera-based wrist-worn systems, thus making EITPose a promising technology for practical hand pose estimation.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 169976 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human Computer Interaction Institute" + } + ], + "personId": 170168 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170136 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "School of Computer Science" + } + ], + "personId": 170564 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Evanston", + "institution": "Northwestern University", + "dsl": "Computer Science" + } + ], + "personId": 169889 + } + ] + }, + { + "id": 170795, + "typeId": 13748, + "title": "Silent Impact: Tracking Tennis Shots from the Passive Arm", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676403" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4691", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171056, + 175062 + ], + "eventIds": [], + "abstract": "Wearable technology has transformed sports analytics, offering new dimensions in enhancing player experience. Yet, many solutions involve cumbersome setups that inhibit natural motion. In tennis, existing products require sensors on the racket or dominant arm, causing distractions and discomfort. We propose Silent Impact, a novel and user-friendly system that analyzes tennis shots using a sensor placed on the passive arm. Collecting Inertial Measurement Unit sensor data from 20 recreational tennis players, we developed neural networks that exclusively utilize passive arm data to detect and classify six shots, achieving a classification accuracy of 88.2% and a detection F1 score of 86.0%, comparable to the dominant arm. These models were then incorporated into an end-to-end prototype, which records passive arm motion through a smartwatch and displays a summary of shots on a mobile app. User study (N=10) showed that participants felt less burdened physically and mentally using Silent Impact on the passive arm. Overall, our research establishes the passive arm as an effective, comfortable alternative for tennis shot analysis, advancing user-friendly sports analytics.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + } + ], + "personId": 169852 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "" + } + ], + "personId": 170690 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + } + ], + "personId": 169944 + } + ] + }, + { + "id": 170796, + "typeId": 13744, + "title": "DeMorph: Morphing Devices Functioning via Sequential Degradation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686737" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1022", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Shape-changing Interface", + "eco-friendly", + "sustainability", + "degradation" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "While it may initially seem counterintuitive to view degradation within an operating system as advantageous, one could argue that, when intentionally designed, the controlled breakdown of materials—whether physical, chemical, or biological—can be leveraged for specific functions. To apply this principle to the development of functional morphing devices, we have introduced the concept of \"Degrade to Function\" (DtF). This concept is aimed at creating eco-friendly and self-contained morphing devices that operate through a series of environmentally-triggered degradations. In this demonstration, we elucidate the DtF design strategy and present five application examples across a range of ecosystems.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170572 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170041 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Mechanical Engineering, Morphing Matter Lab" + } + ], + "personId": 170224 + } + ] + }, + { + "id": 170797, + "typeId": 13744, + "title": "Demonstrating Z-Band: Enabling Subtle Hand Interactions with Bio-impedance Sensing on the Wrist", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686766" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1143", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Wearable input devices", + "AR/VR", + "RF sensing", + "Sensor fusion", + "Machine Learning", + "Interaction Techniques" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "The increasing popularity of wearable extended reality (XR) technologies presents unique challenges for user input, as traditional methods like touchscreens or controllers can be cumbersome and less practical. As a result, researchers are exploring novel approaches to enable seamless and intuitive interaction within XR environments. In this work, we present Z-Band, a novel interaction device that enables subtle finger input in a wrist-worn form factor. Z-Band utilizes radio frequency (RF) sensing, leveraging the human hand as an antenna to detect subtle changes in hand impedance caused by finger movements. We demonstrate the feasibility and effectiveness of Z-Band by showcasing its capabilities through two applications: a music player controlled by finger gestures and a gesture-based game.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School for Computer Science & Engineering" + } + ], + "personId": 169822 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul Allen School of Computer Science and Engineering" + } + ], + "personId": 170389 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "SEATTLE", + "institution": "University of Washington", + "dsl": "Paul Allen School of Computer Science and Engineering" + } + ], + "personId": 170608 + } + ] + }, + { + "id": 170798, + "typeId": 13748, + "title": "MouthIO: Fabricating Customizable Oral User Interfaces with Integrated Sensing and Actuation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676443" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1182", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175060, + 171050 + ], + "eventIds": [], + "abstract": "This paper introduces MouthIO, the first customizable intraoral user interface that can be equipped with various sensors and output components. MouthIO consists of an SLA-printed brace that houses a flexible PCB within a bite-proof enclosure positioned between the molar teeth and inner cheeks. Our MouthIO design and fabrication technique enables makers to customize the oral user interfaces in both form and function at low cost. All parts in contact with the oral cavity are made of bio-compatible materials to ensure safety, while the design takes into account both comfort and portability. We demonstrate MouthIO through three application examples ranging from beverage consumption monitoring, health monitoring, to assistive technology. Results from our full-day user study indicate high wearability and social acceptance levels, while our technical evaluation demonstrates the device's ability to withstand adult bite forces.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 170394 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169700 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 170127 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Computer Science, Aarhus University", + "dsl": "" + } + ], + "personId": 170491 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170347 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169695 + } + ] + }, + { + "id": 170799, + "typeId": 13744, + "title": "Palmrest+: Expanding Laptop Input Space with Shear Force on Palm-Resting Area", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1023", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We propose Palmrest+, an additional input space on laptops that utilizes shear force on the palmrest area. Palmrest+ enables seamless and rapid input in the middle of text entry, leveraging the consistent contact between users' palms and the palmrest areas during keyboard interaction. Our prototype has the size of a conventional laptop, and incorporated sensors to detect shear forces on both palmrest areas. We present two input techniques: Palmrest Shortcut, for instant shortcut execution, and Palmrest Joystick, for continuous value input. Palmrest Shortcut involves applying shear force in one of four directions and can be performed unimanually, bimanually, or with the combinations of a key press. Palmrest Joystick allows for one- or two- dimensional continuous manipulation. Users can use their non-dominant hand for quasi-mode activation and dominant hand for controlling the value in rate-control manner. We demonstrate the practicality of these techniques through real-world applications in Office applications including Whiteboard, and Document Editor application.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169777 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + } + ], + "personId": 170627 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 170017 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169797 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169896 + } + ] + }, + { + "id": 170801, + "typeId": 13744, + "title": "PointerVol: A Laser Pointer for Swept Volumetric Displays", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1145", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "A laser pointer is a commonly used device that does not require communication with the display system or modifications on the applications, the presenter can just take a pointer and start using it. When a laser pointer is used on a volumetric display, a line rather than a point appears, making it not suitable for pointing at 3D locations. PointerVol is a modified laser pointer that allows users to point to 3D positions inside a swept volumetric display. We propose two PointerVol implementations based on timing and distance measurements, we evaluate the pointing performance using them. Finally, we present other features such as multi-user pointing, line patterns and a multi-finger wearable. PointerVol is a simple device that can help to popularize volumetric displays, or at least to make them more usable for presentations with true-3D content.", + "authors": [ + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Pública de Navarra", + "dsl": "" + } + ], + "personId": 170606 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Publica de Navarra", + "dsl": "UpnaLab" + } + ], + "personId": 170108 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Pública de Navarra", + "dsl": "" + } + ], + "personId": 170565 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Public University of Navarra", + "dsl": "" + } + ], + "personId": 169948 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Pública de Navarra", + "dsl": "" + } + ], + "personId": 169865 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "", + "city": "Pamplona", + "institution": "Universidad Pública de Navarra", + "dsl": "" + } + ], + "personId": 169744 + }, + { + "affiliations": [ + { + "country": "Spain", + "state": "Navarre", + "city": "Pamplona", + "institution": "Universidad Publica de Navarra", + "dsl": "UpnaLab" + } + ], + "personId": 170633 + } + ] + }, + { + "id": 170802, + "typeId": 13744, + "title": "ScreenConcealer: Privacy-protection System with Obfuscations for Screen Sharing", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686750" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1024", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Privacy", + "Personal data", + "Virtual meetings", + "Screen sharing" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "During screen sharing in virtual meetings, there have been many cases of unintentional disclosure of personal data that can directly identify users (e.g., names or addresses) or private information that directly does not identify users but that they do not want to share (e.g., users' preferences or online activities). We propose a system that protects personal data and private information on web browsers from being viewed by others. The system automatically takes screenshots of the web-browser window and detects personal data and private information from the images. The system then obfuscates the detected area with blocking or blurring, and displays it as a duplicated window. We present an experiment on a model that detects personal data and private information on web browsers to implement our system. We also introduce several features to improve user experience.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Ochanomizu University", + "dsl": "" + } + ], + "personId": 170426 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "LY Corporation", + "dsl": "" + } + ], + "personId": 170189 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Ochanomizu University", + "dsl": "" + } + ], + "personId": 170417 + } + ] + }, + { + "id": 170803, + "typeId": 13748, + "title": "EVE: Enabling Anyone to Train Robots using Augmented Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676413" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2273", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171051, + 175089 + ], + "eventIds": [], + "abstract": "The increasing affordability of robot hardware is accelerating the integration of robots into everyday activities. However, training a robot to automate a task requires expensive trajectory data where a trained human annotator moves a physical robot to train it. Consequently, only those with access to robots produce demonstrations to train robots. In this work, we remove this restriction with EVE, an iOS app that enables everyday users to train robots using intuitive augmented reality visualizations, without needing a physical robot. With EVE, users can collect demonstrations by specifying waypoints with their hands, visually inspecting the environment for obstacles, modifying existing waypoints, and verifying collected trajectories. In a user study (N=14, D=30) consisting of three common tabletop tasks, EVE outperformed three state-of-the-art interfaces in success rate and was comparable to kinesthetic teaching—physically moving a physical robot—in completion time, usability, motion intent communication, enjoyment, and preference (mean of p=0.30). EVE allows users to train robots for personalized tasks, such as sorting desk supplies, organizing ingredients, or setting up board games. We conclude by enumerating limitations and design considerations for future AR-based demonstration collection systems for robotics.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 169801 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170053 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170345 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Computer Science and Engineering" + }, + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "NVIDIA", + "dsl": "" + } + ], + "personId": 169970 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Computer Science & Engineering" + }, + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Allen Institute for Artificial Intelligence", + "dsl": "" + } + ], + "personId": 170218 + } + ] + }, + { + "id": 170804, + "typeId": 13744, + "title": "Demonstration of Wheeler: A Three-Wheeled Input Device for Usable, Efficient, and Versatile Non-Visual Interaction", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686749" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1025", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Non-visual interaction", + "input device", + "mouse", + "haptics", + "multi-wheel", + "rotational input", + "blind", + "vision impairments" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Navigating multi-level menus with complex hierarchies remains a big challenge for blind and low-vision users, who predominantly use screen readers to interact with computers. To that end, we demonstrate Wheeler, a three-wheeled input device with two side buttons that can speed up complex multi-level hierarchy navigation in common applications. When in operation, the three wheels of Wheeler are each mapped to a different level in the application hierarchy. Each level can be independently traversed using its designated wheel, allowing users to navigate through multiple levels efficiently. Wheeler's three wheels can also be repurposed for other tasks such as 2D cursor manipulation. In this demonstration, we describe the different operation modes and usage of Wheeler.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "University Park", + "institution": "Pennsylvania State University", + "dsl": "College of Information Sciences and Technology" + } + ], + "personId": 169922 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New Hampshire", + "city": "Durham", + "institution": "University of New Hampshire", + "dsl": " Cognitive Assistive Robotics Lab" + } + ], + "personId": 169923 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "State College", + "institution": "Pennsylvania State University", + "dsl": "College of Information Sciences and Technology" + } + ], + "personId": 169816 + }, + { + "affiliations": [ + { + "country": "Bangladesh", + "state": "", + "city": "Dhaka", + "institution": "Innovation Garage Limited", + "dsl": "" + } + ], + "personId": 170081 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Fordham University", + "dsl": "Computer and Information Science" + } + ], + "personId": 170493 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "University Park ", + "institution": "Pennsylvania State University", + "dsl": "College of Information Sciences and Technology" + } + ], + "personId": 170647 + } + ] + }, + { + "id": 170805, + "typeId": 13748, + "title": "Pro-Tact: Hierarchical Synthesis of Proprioception and Tactile Exploration for Eyes-Free Ray Pointing on Out-of-View VR Menus", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676324" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9341", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175074, + 171031 + ], + "eventIds": [], + "abstract": "We introduce Pro-Tact, a novel eyes-free pointing technique for interacting with out-of-view (OoV) VR menus. This technique combines rapid rough pointing using proprioception with fine-grain adjustments through tactile exploration, enabling menu interaction without visual attention. Our user study demonstrated that Pro-Tact allows users to select menu items accurately (95% accuracy for 54 items) in an eyes-free manner, with reduced fatigue and sickness compared to eyes-engaged interaction. Additionally, we observed that participants voluntarily interacted with OoV menus eyes-free when Pro-Tact's tactile feedback was provided in practical VR application usage contexts. This research contributes by introducing the novel interaction technique, Pro-Tact, and quantitatively evaluating its benefits in terms of performance, user experience, and user preference in OoV menu interactions.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169930 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169777 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab, School of Computing" + } + ], + "personId": 170398 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 170628 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169896 + } + ] + }, + { + "id": 170806, + "typeId": 13748, + "title": "An Interactive System for Suporting Creative Exploration of Cinematic Composition Designs", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676393" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9583", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171021, + 175064 + ], + "eventIds": [], + "abstract": "Designing cinematic compositions, which involves moving cameras through a scene, is essential yet challenging in filmmaking. Machinima filmmaking provides real-time virtual environments for exploring different compositions flexibly and efficiently. However, producing high-quality cinematic compositions in such environments still requires significant cinematography skills and creativity. This paper presents Cinemassist, a tool designed to support and enhance this creative process by generating a variety of cinematic composition proposals at both keyframe and scene levels, which users can incorporate into their workflows and achieve more creative results. At the crux of our system is a deep generative model trained on real movie data, which can generate plausible, diverse camera poses conditioned on 3D animations and additional input semantics. Our model enables an interactive cinematic composition design workflow where users can co-design with the model by being inspired by model-generated suggestions while having control over the generation process. Our user study and expert rating find Cinemassist can facilitate the design process for users of different backgrounds and enhance the design quality especially for users with animation expertise, demonstrating its potential as an invaluable tool in the context of digital filmmaking.", + "authors": [ + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong Polytechnic University", + "dsl": "School of Design" + } + ], + "personId": 170584 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Kowloon", + "institution": "The Hong Kong Polytechnic University", + "dsl": "School of Design" + } + ], + "personId": 170225 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Shanghai", + "city": "Shanghai", + "institution": "ShanghaiTech University", + "dsl": "" + } + ], + "personId": 169868 + } + ] + }, + { + "id": 170807, + "typeId": 13744, + "title": "HoloChemie - Sustainable Fabrication of Soft Biochemical Holographic devices for Ubiquitous Sensing", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1026", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Sustainable fabrication approaches and biomaterials are increasingly being used in HCI for fabricating interactive devices. However, thus far majority of the work has focused on integrating electronics. This paper takes a biochemical approach to explore sustainable fabrication approaches for creating biological and environmental sensing devices. \r\nFirstly, we contribute a set of biochemical formulations for biological and environmental sensing which are compatible with bio-sourced and environment-friendly substrate materials. Our formulations are based on a combination of enzymes derived from bacteria and fungi, plant extracts and commercially available chemicals to sense both liquid and gaseous analytes: glucose, lactic acid, pH levels and carbon dioxide. Our novel holographic sensing scheme not only allows for the detection of the presence of analytes but also enables quantitative estimation of the analyte levels. We present a set of application scenarios that demonstrate the versatility of our approach and discuss the sustainability aspects, its limitations, and the implications for bio-chemical systems in HCI.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Department of Electrical and Software Engineering " + } + ], + "personId": 170536 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Electrical and Software Engineering Department" + } + ], + "personId": 170603 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Department of Biomedical Engineering " + } + ], + "personId": 170444 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Biomedical Engineering" + } + ], + "personId": 170217 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Department of Computer Science" + } + ], + "personId": 170094 + } + ] + }, + { + "id": 170808, + "typeId": 13744, + "title": "Demo of PortaChrome: A Portable Contact Light Source for Integrated Re-Programmable Multi-Color Textures", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686774" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1147", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "digital fabrication", + "programmable textures", + "photochromic dyes" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "In this demo, we present PortaChrome, a portable light source that can be attached to everyday objects to reprogram the color of surfaces in contact with them. When PortaChrome makes contact with objects that were previously coated with photochromic dyes, the UV and RGBs LEDs inside PortaChrome create multi-color textures on the objects. In contrast to prior work, which used projectors for the color-change, PortaChrome has a thin and flexible form factor, which allows the color-change process to be integrated into daily user interactions. Because of the close distance between the light source and the photochromic object, PortaChrome creates color textures in less than 4 minutes on average, which is 8 times faster than prior work. We demonstrate PortaChrome with four application examples, including data visualizations on textiles and personalized wearables.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170620 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "" + } + ], + "personId": 170698 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Department of Electrical Engineering and Computer Sciences" + } + ], + "personId": 170512 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170136 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170340 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 169939 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170651 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170413 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169695 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170347 + } + ] + }, + { + "id": 170809, + "typeId": 13744, + "title": "Demonstration of WorldScribe: Towards Context-Aware Live Visual Descriptions", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1148", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Automated live visual descriptions can aid blind people in understanding their surroundings with autonomy and independence. However, providing descriptions that are rich, contextual, and just-in-time has been a long-standing challenge in accessibility. In this work, we develop WorldScribe, a system that generates automated live real-world visual descriptions that are customizable and adaptive to users’ contexts: (i) WorldScribe’s descriptions are tailored to users’ intents and prioritized based on semantic relevance. (ii) WorldScribe is adaptive to visual contexts, e.g., providing consecutively succinct descriptions for dynamic scenes, while presenting longer and detailed ones for stable settings. (iii) WorldScribe is adaptive to sound contexts, e.g., increasing volume in noisy environments, or pausing when conversations start. Powered by a suite of vision, language, and sound recognition models, WorldScribe introduces a description generation pipeline that balances the tradeoffs between their richness and latency to support real-time use. The design of WorldScribe is informed by prior work on providing visual descriptions and a formative study with blind participants. Our user study and subsequent pipeline evaluation show that WorldScribe can provide real-time and fairly accurate visual descriptions to facilitate environment understanding that is adaptive and customized to users’ contexts. Finally, we discuss the implications and further steps toward making live visual descriptions more context-aware and humanized.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170638 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170646 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170298 + } + ] + }, + { + "id": 170810, + "typeId": 13744, + "title": "Interactive Demo: Facilitating the Parametric Definition of Geometric Properties in Programming-Based CAD", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1027", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Creating parametric designs in programming-based CAD applications poses significant challenges due to the need for complex programming and arithmetic expressions to define geometric properties and link object attributes. To address this, we present a solution implemented in the CAD application OpenSCAD that enables the retrieval of parametric expressions directly from visual representations, streamlining the design process. Our approach extends the abstract definition of Constructive Solid Geometry (CSG) by incorporating handles, allowing users to target specific parts of geometries. It also facilitates users in extracting parametric definitions for part positions by interacting with visual elements and reusing them in code to define new elements' dimensions or positions. Specifically, users can extract the position of a handle or the delta vector between two handles, facilitating precise parametric movement and alignment. This proof-of-concept demonstrates the potential of these interactions for extending to other attributes, such as orientation or semantic information, thereby improving interactivity and facilitating the design of parametric models in programming-based CAD applications.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Ottawa", + "institution": "Carleton University", + "dsl": "School of Information Technology" + } + ], + "personId": 169705 + }, + { + "affiliations": [ + { + "country": "France", + "state": "", + "city": "Lille", + "institution": "Université de Lille", + "dsl": "" + } + ], + "personId": 170405 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Ottawa", + "institution": "Carleton University", + "dsl": "" + } + ], + "personId": 170159 + }, + { + "affiliations": [ + { + "country": "France", + "state": "", + "city": "Lille", + "institution": "Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9189 CRIStAL", + "dsl": "" + } + ], + "personId": 169810 + } + ] + }, + { + "id": 170811, + "typeId": 13748, + "title": "What's in a cable? Abstracting Knitting Design Elements with Blended Raster/Vector Primitives", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676351" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5652", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171020, + 175086 + ], + "eventIds": [], + "abstract": "In chart-based programming environments for machine knitting, patterns are specified at a low level by placing operations on a grid. This highly manual workflow makes it challenging to iterate on design elements such as cables, colorwork, and texture. While vector-based abstractions for knitting design elements may facilitate higher-level manipulation, they often include interdependencies which require stitch-level reconciliation. To address this, we contribute a new way of specifying knits with blended vector and raster primitives. Our abstraction supports the design of interdependent elements like colorwork and texture. We have implemented our blended raster/vector specification in a direct manipulation design tool where primitives are layered and rasterized, allowing for simulation of the resulting knit structure and generation of machine instructions. Through examples, we show how our approach enables higher-level manipulation of various knitting techniques, including intarsia colorwork, short rows, and cables. Specifically, we show how our tool supports the design of complex patterns including origami pleat patterns and capacitive sensor patches.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Human Centered Design and Engineering" + } + ], + "personId": 170192 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170216 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "Boston University", + "dsl": "Computer Science" + } + ], + "personId": 170069 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170689 + } + ] + }, + { + "id": 170812, + "typeId": 13748, + "title": "MyWebstrates: Webstrates as Local-first Software", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676445" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6863", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171045, + 175069 + ], + "eventIds": [], + "abstract": "Webstrates are web substrates, a practical realization of shareable dynamic media under which distributability, shareability, and malleability are fundamental software principles. Webstrates blur the distinction between application and document in a way that enables users to share, repurpose, and refit software across a variety of domains, but its reliance on a central server constrains its use; it is at odds with personal and collective control of data; and limits applications to the web. We extend the fundamental principles to include interoperability and sovereignty over data and propose MyWebstrates, an implementation of Webstrates on top of a new, lower-level substrate for synchronization built around local-first software principles. MyWebstrates registers itself in the user’s browser and function as a piece of local software that can selectively synchronise data over sync servers or peer-to-peer connections. We show how MyWebstrates extends Webstrates to enable offline collaborative use, interoperate between Webstrates on non-web technologies such as Unity, and maintain personal and collective sovereignty over data. We demonstrate how this enables new types of applications of Webstrates and discuss limitations of this approach and new challenges that it reveals.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170679 + }, + { + "affiliations": [ + { + "country": "France", + "state": "", + "city": "Paris", + "institution": "Institut Polytechnique de Paris", + "dsl": "LTCI, Télécom Paris" + } + ], + "personId": 169778 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Ink & Switch", + "dsl": "" + } + ], + "personId": 170688 + } + ] + }, + { + "id": 170813, + "typeId": 13744, + "title": "Demonstration of ProgramAlly: Creating Custom Visual Access Programs via Multi-Modal End-User Programming", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1151", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Existing visual assistive technologies are built for simple and common use cases, and have few avenues for blind people to customize their functionalities. Drawing from prior work on DIY assistive technology, this paper investigates end-user programming as a means for users to create and customize visual access programs to meet their unique needs. We introduce ProgramAlly, a system for creating custom filters for visual information, e.g., `find NUMBER on BUS', leveraging three end-user programming approaches: block programming, natural language, and programming by example. To implement ProgramAlly, we designed a representation of visual filtering tasks based on scenarios encountered by blind people, and integrated a set of on-device and cloud models for generating and running these programs. In user studies with 12 blind adults, we found that participants preferred different programming modalities depending on the task, and envisioned using visual access programs to address unique accessibility challenges that are otherwise difficult with existing applications. Through ProgramAlly, we present an exploration of how blind end-users can create visual access programs to customize and control their experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170624 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170483 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 169750 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170298 + } + ] + }, + { + "id": 170814, + "typeId": 13749, + "title": "Flexmock: Fast, easy, stockable smocking method using 3D printed self-shrinkable pattern sheet", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686335" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-3946", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175086 + ], + "eventIds": [], + "abstract": "In this study, we propose Flexmock, a fabrication method that enables users to easily generate smocking patterns onto cloth by shrinking attachment sheets with geometric patterns. The proposed method offers the following advantages: (1) creating smocking patterns parametrically using design software and simulating the finished product; (2) automatically generating and exporting data for self-shrinking attachments; and (3) quickly applying smocking patterns to fabrics via attached 3D printed pattern sheets.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Miyagi", + "institution": "Miyagi university", + "dsl": "" + } + ], + "personId": 170551 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Miyagi", + "institution": "Miyagi university", + "dsl": "" + } + ], + "personId": 169774 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Miyagi", + "institution": "Miyagi university", + "dsl": "" + } + ], + "personId": 170423 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Miyagi", + "institution": "Miyagi university", + "dsl": "" + } + ], + "personId": 170714 + } + ] + }, + { + "id": 170815, + "typeId": 13744, + "title": "Demonstrating DrawTalking: Building Interactive Worlds by Sketching and Speaking", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1031", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": " We introduce DrawTalking, an approach to building and controlling interactive worlds by sketching and speaking during storytelling. It emphasizes user control and flexibility, and gives programming-like capability without requiring code. We demonstrate the prototype and invite guests to interact with it live.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "New York University", + "dsl": "" + } + ], + "personId": 170618 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169841 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169812 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Diego", + "institution": "University of California, San Diego", + "dsl": "Department of Cognitive Science and Design Lab" + } + ], + "personId": 170393 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "New York University", + "dsl": "Future Reality Lab" + } + ], + "personId": 169849 + } + ] + }, + { + "id": 170816, + "typeId": 13748, + "title": "VisionTasker: Mobile Task Automation Using Vision Based UI Understanding and LLM Task Planning", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676386" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5538", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175076, + 171053 + ], + "eventIds": [], + "abstract": "Mobile task automation is an emerging field that leverages AI to streamline and optimize the execution of routine tasks on mobile devices, thereby enhancing efficiency and productivity. Traditional methods, such as Programming By Demonstration (PBD), are limited due to their dependence on predefined tasks and susceptibility to app updates. Recent advancements have utilized the view hierarchy to collect UI information and employed Large Language Models (LLM) to enhance task automation. However, view hierarchies have accessibility issues and face potential problems like missing object descriptions or misaligned structures. This paper introduces VisionTasker, a two-stage framework combining vision-based UI understanding and LLM task planning, for mobile task automation in a step-by-step manner. VisionTasker firstly converts a UI screenshot into natural language interpretations using a vision-based UI understanding approach, eliminating the need for view hierarchies. Secondly, it adopts a step-by-step task planning method, presenting one interface at a time to the LLM. The LLM then identifies relevant elements within the interface and determines the next action, enhancing accuracy and practicality. Extensive experiments show that VisionTasker outperforms previous methods, providing effective UI representations across four datasets. Additionally, in automating 147 real-world tasks on an Android smartphone, VisionTasker demonstrates advantages over humans in tasks where humans show unfamiliarity and shows significant improvements when integrated with the PBD mechanism. VisionTasker is open-source and available at https://github.com/AkimotoAyako/VisionTasker.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Xi'an", + "institution": "Xi'an Jiaotong University", + "dsl": "MOE KLINNS Lab" + } + ], + "personId": 170197 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Xi'an", + "institution": "Xi'an Jiaotong University", + "dsl": "MOE KLINNS Lab" + } + ], + "personId": 169892 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Xi'an", + "institution": "Xi'an Jiaotong University", + "dsl": "" + } + ], + "personId": 170032 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Xi’an", + "institution": "Xi'an Jiaotong University", + "dsl": "" + } + ], + "personId": 170051 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Xi’an ", + "institution": "Xi’an Jiaotong University ", + "dsl": "MOE KLINNS Lab" + } + ], + "personId": 170473 + } + ] + }, + { + "id": 170817, + "typeId": 13748, + "title": "TouchInsight: Uncertainty-aware Rapid Touch and Text Input for Mixed Reality from Egocentric Vision", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676330" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2943", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175063, + 171043 + ], + "eventIds": [], + "abstract": "While passive surfaces offer numerous benefits for interaction in mixed reality, reliably detecting touch input solely from head-mounted cameras has been a long-standing challenge. Camera specifics, hand self-occlusion, and rapid movements of both head and fingers introduce considerable uncertainty about the exact location of touch events. Existing methods have thus not been capable of achieving the performance needed for robust interaction.\r\nIn this paper, we present a real-time pipeline that detects touch input from all ten fingers on any physical surface, purely based on egocentric hand tracking. Our method TouchInsight comprises a neural network to predict the moment of a touch event, the finger making contact, and the touch location. TouchInsight represents locations through a bivariate Gaussian distribution to account for uncertainties due to sensing inaccuracies, which we resolve through contextual priors to accurately infer intended user input.\r\nWe first evaluated our method offline and found that it locates input events with a mean error of 6.3 mm, and accurately detects touch events (F1=0.99) and identifies the finger used (F1=0.96). In an online evaluation, we then demonstrate the effectiveness of our approach for a core application of dexterous touch input: two-handed text entry. In our study, participants typed 37.0 words per minute with an uncorrected error rate of 2.9% on average.", + "authors": [ + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zürich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 170554 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Facebook Reality Labs", + "dsl": "" + } + ], + "personId": 170158 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Reality Labs Research", + "dsl": "Meta" + } + ], + "personId": 170415 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Facebook", + "dsl": "Facebook Reality Labs" + } + ], + "personId": 170353 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Reality Labs", + "dsl": "" + } + ], + "personId": 170336 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 170358 + } + ] + }, + { + "id": 170818, + "typeId": 13749, + "title": "Data Pictorial: Deconstructing Raster Images for Data-Aware Animated Vector Posters", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686353" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6531", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175089 + ], + "eventIds": [], + "abstract": "To support data integration into pictorials, we propose Data Pictorial, a pipeline that deconstructs a raster image into SVG objects whose attributes are contextualized in data. This process is achieved by cropping objects of interest using zero-shot detection, converting them into quantized bitmaps, and tracing the results as SVG paths. The technique then provides suggestions for binding the SVG objects and properties with data fields, affording the flexibility to automatically modify and animate the SVG based on the mapping. The resultant data-aware vector hypermedia can be potential candidates for real-time data inspection and personalization, all while maintaining the aesthetic of the original pictorial.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Rhode Island", + "city": "Providence", + "institution": "Brown University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170659 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170302 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170112 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170031 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170414 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169698 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170317 + } + ] + }, + { + "id": 170819, + "typeId": 13748, + "title": "NotePlayer: Engaging Jupyter Notebooks for Dynamic Presentation of Analytical Processes", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676410" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6869", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171055, + 175081 + ], + "eventIds": [], + "abstract": "Diverse presentation formats play a pivotal role in effectively conveying code and analytical processes during data analysis. One increasingly popular format is tutorial videos, particularly those based on Jupyter notebooks, which offer an intuitive interpretation of code and vivid explanations of analytical procedures. However, creating such videos requires a diverse skill set and significant manual effort, posing a barrier for many analysts. To bridge this gap, we introduce an innovative tool called NotePlayer, which connects notebook cells to video segments and incorporates a computational engine with language models to streamline video creation and editing. Our aim is to make the process more accessible and efficient for analysts. To inform the design of NotePlayer, we conducted a formative study and performed content analysis on a corpus of 38 Jupyter tutorial videos. This helped us identify key patterns and challenges encountered in existing tutorial videos, guiding the development of NotePlayer. Through a combination of a usage scenario and a user study, we validated the effectiveness of NotePlayer. The results show that the tool streamlines the video creation and facilitates the communication process for data analysts.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "ShanghaiTech University", + "dsl": "School of Information Science and Technology" + }, + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "ShanghaiTech University", + "dsl": "School of Information Science and Technology" + } + ], + "personId": 170079 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Department of Computer Science and Engineering" + }, + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 169902 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Microsoft Research Asia", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Microsoft Research Asia", + "dsl": "" + } + ], + "personId": 170468 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Shanghai", + "city": "Shanghai", + "institution": "ShanghaiTech University", + "dsl": "School of Information Science and Technology" + }, + { + "country": "China", + "state": "Shanghai", + "city": "Shanghai", + "institution": "ShanghaiTech University", + "dsl": "School of Information Science and Technology" + } + ], + "personId": 169792 + } + ] + }, + { + "id": 170820, + "typeId": 13748, + "title": "SHAPE-IT: Exploring Text-to-Shape-Display for Generative Shape-Changing Behaviors with LLMs", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676348" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7837", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175067, + 171061 + ], + "eventIds": [], + "abstract": "This paper introduces text-to-shape-display, a novel approach to generating dynamic shape changes in pin-based shape displays through natural language commands. By leveraging large language models (LLMs) and AI-chaining, our approach allows users to author shape-changing behaviors on demand through text prompts without programming. We describe the foundational aspects necessary for such a system, including the identification of key generative elements (primitive, animation, and interaction) and design requirements to enhance user interaction, based on formative exploration and iterative design processes. Based on these insights, we develop SHAPE-IT, an LLM-based authoring tool for a 24 x 24 shape display, which translates the user's textual command into executable code and allows for quick exploration through a web-based control interface. We evaluate the effectiveness of SHAPE-IT in two ways: 1) performance evaluation and 2) user evaluation (N= 10). The study conclusions highlight the ability to facilitate rapid ideation of a wide range of shape-changing behaviors with AI. However, the findings also expose accuracy-related challenges and limitations, prompting further exploration into refining the framework for leveraging AI to better suit the unique requirements of shape-changing systems.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169803 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169959 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170271 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169785 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 170821, + "typeId": 13749, + "title": "Large Language Model Agents Enabled Generative Design of Fluidic Computation Interfaces", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686351" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-8518", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175067, + 171049 + ], + "eventIds": [], + "abstract": "The creation of interactive devices is a major area of interest. However, traditional design tools in this field often require a significant learning curve and may not effectively support creative ideation. This study explores the use of fluidic computation interfaces as a case study to examine the potential of enhancing design tools for physical devices with Large Language Model (LLM) agents. With LLM agents, the Generative Design Tool (GDT) can understand the capabilities and limitations of new devices, suggest diverse, insightful, and practical application scenarios, and recommend designs that are technically and contextually appropriate. Additionally, it generates the necessary design parameters for the traditional components of the design tool to visualize results and create files for fabrication.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170572 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Xiamen", + "institution": "Xiamen University", + "dsl": "" + } + ], + "personId": 169786 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Beijing", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 169791 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170059 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 169899 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170195 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Mechanical Engineering, Morphing Matter Lab" + } + ], + "personId": 170224 + } + ] + }, + { + "id": 170822, + "typeId": 13756, + "title": "EmoPus: Providing Emotional and Tactile Comfort with a AI Desk Companion Octopus", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24d-9612", + "source": "PCS", + "trackId": 13203, + "tags": [], + "keywords": [], + "sessionIds": [ + 171027 + ], + "eventIds": [], + "abstract": "In the desktop scene, people often work and study for a long time, which can lead to mental stress and depression, therefore bringing the need for companion robots. Existing products offer voice dialogue and motion changes but lack direct user interactions. Consequently, we propose EmoPus, an AI companion robot resembling an octopus, designed for multi-dimensional interactions. Users can talk to EmoPus by voice and command it to complete simple tasks like grabbing objects. It also actively interacts based on the user's mental state, soothing the user's arm with its tentacles and offering a comforting tactile perception. User interaction would also change the shape of the EmoPus itself and provide real-time responses. In general, EmoPus interacts with users based on their psychological state, bringing fun, mental relief, and Emotional Companion while reducing the psychological burden after long work or study hours.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "Guangdong", + "city": "Guangzhou", + "institution": "Computational Media and Arts Thrust, Information Hub", + "dsl": "Hong Kong University of Science and Technology (Guangzhou)" + }, + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "School of Art and Design", + "dsl": "Beijing Forestry University" + } + ], + "personId": 169828 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Guangdong", + "city": "Guangzhou", + "institution": "Computational Media and Arts Thrust, Information Hub", + "dsl": "Hong Kong University of Science and Technology (Guangzhou)" + }, + { + "country": "China", + "state": "", + "city": "Suzhou", + "institution": "Duke Kunshan University", + "dsl": "" + } + ], + "personId": 169790 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Guangzhou", + "institution": "Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "" + } + ], + "personId": 170052 + } + ] + }, + { + "id": 170823, + "typeId": 13748, + "title": "UICrit: Enhancing Automated Design Evaluation with a UI Critique Dataset", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676381" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6167", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175076, + 171053 + ], + "eventIds": [], + "abstract": "Automated UI evaluation can be beneficial for the design process; for example, to compare different UI designs, or conduct automated heuristic evaluation. LLM-based UI evaluation, in particular, holds the promise of generalizability to a wide variety of UI types and evaluation tasks. However, current LLM-based techniques do not yet match the performance of human evaluators. We hypothesize that automatic evaluation can be improved by collecting a targeted UI feedback dataset and then using this dataset to enhance the performance of general-purpose LLMs. We present a targeted dataset of 3,059 design critiques and quality ratings for 983 mobile UIs, collected from seven designers, each with at least a year of professional design experience. We carried out an in-depth analysis to characterize the dataset's features. We then applied this dataset to achieve a 55\\% performance gain in LLM-generated UI feedback via various few-shot and visual prompting techniques. We also discuss future applications of this dataset, including training a reward model for generative UI techniques, and fine-tuning a tool-agnostic multi-modal LLM that automates UI evaluation.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 169717 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google Research", + "dsl": "" + } + ], + "personId": 170649 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google Research", + "dsl": "" + } + ], + "personId": 170546 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 169964 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google Research", + "dsl": "" + } + ], + "personId": 170586 + } + ] + }, + { + "id": 170824, + "typeId": 13748, + "title": "Bluefish: Composing Diagrams with Declarative Relations", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676465" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7493", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171019, + 175065 + ], + "eventIds": [], + "abstract": "Diagrams are essential tools for problem-solving and communication as they externalize conceptual structures using spatial relationships. But when picking a diagramming framework, users are faced with a dilemma. They can either use a highly expressive but low-level toolkit, whose API does not match their domain-specific concepts, or select a high-level typology, which offers a recognizable vocabulary but supports a limited range of diagrams. To address this gap, we introduce Bluefish: a diagramming framework inspired by component-based user interface (UI) libraries. Bluefish lets users create diagrams using relations: declarative, composable, and extensible diagram fragments that relax the concept of a UI component. Unlike a component, a relation does not have sole ownership over its children nor does it need to fully specify their layout. To render diagrams, Bluefish extends a traditional tree-based scenegraph to a compound graph that captures both hierarchical and adjacent relationships between nodes. To evaluate our system, we construct a diverse example gallery covering many domains including mathematics, physics, computer science, and even cooking. We show that Bluefish's relations are effective declarative primitives for diagrams. Bluefish is open source, and we aim to shape it into both a usable tool and a research platform.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "CSAIL" + } + ], + "personId": 170697 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "" + } + ], + "personId": 169727 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "Visualization Group" + } + ], + "personId": 170141 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Ottawa", + "institution": "N/A", + "dsl": "" + } + ], + "personId": 169787 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "" + } + ], + "personId": 170678 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "CSAIL" + } + ], + "personId": 170171 + } + ] + }, + { + "id": 170825, + "typeId": 13748, + "title": "AccessTeleopKit: A Toolkit for Creating Accessible Web-Based Interfaces for Tele-Operating an Assistive Robot", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676355" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5502", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171024, + 175078 + ], + "eventIds": [], + "abstract": "Mobile manipulator robots, which can move around and physically interact with their environments, can empower people with motor limitations to independently carry out many activities of daily living. While many interfaces have been developed for tele-operating complex robots, most of them are not accessible to people with severe motor limitations. Further, most interfaces are rigid with limited configurations and are not readily available to download and use. To address these barriers, we developed AccessTeleopKit: an open-source toolkit for creating custom and accessible robot tele-operation interfaces based on cursor-and-click input for the Stretch 3 mobile-manipulator. With AccessTeleopKit users can add, remove, and rearrange components such as buttons and camera views, and select between a variety of control modes. We describe the participatory and iterative design process that led to the current implementation of AccessTeleopKit, involving three long-term deployments of the robot in the home of a quadriplegic user. We demonstrate how AccessTeleopKit allowed the user to create different interfaces for different tasks and the diversity of tasks it allowed the user to carry out. We also present two studies involving six additional users with severe motor limitations, demonstrating the power of AccessTeleopKit in creating custom interfaces for different user needs and preferences.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170668 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170687 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Independent Researcher", + "dsl": "" + } + ], + "personId": 170288 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170636 + } + ] + }, + { + "id": 170826, + "typeId": 13748, + "title": "Natural Expression of a Machine Learning Model's Uncertainty Through Verbal and Non-Verbal Behavior of Intelligent Virtual Agents", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676454" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1024", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171023, + 175084 + ], + "eventIds": [], + "abstract": "Uncertainty cues are inherent in natural human interaction, as they signal to communication partners how much they can rely on conveyed information. Humans subconsciously provide such signals both verbally (e.g., through expressions such as \"maybe\" or \"I think\") and non-verbally (e.g., by diverting their gaze). In contrast, artificial intelligence (AI)-based services and machine learning (ML) models such as ChatGPT usually do not disclose the reliability of answers to their users.\r\nIn this paper, we explore the potential of combining ML models as powerful information sources with human means of expressing uncertainty to contextualize the information. We present a comprehensive pipeline that comprises (1) the human-centered collection of (non-)verbal uncertainty cues, (2) the transfer of cues to virtual agent videos, (3) the annotation of videos for perceived uncertainty, and (4) the subsequent training of a custom ML model that can generate uncertainty cues in virtual agent behavior. In a final step (5), the trained ML model is evaluated in terms of both fidelity and generalizability of the generated (non-)verbal uncertainty behavior.", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Hamburg", + "institution": "Universität Hamburg", + "dsl": "" + } + ], + "personId": 170338 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "Hamburg", + "city": "Hamburg", + "institution": "Universität Hamburg", + "dsl": "" + } + ], + "personId": 170408 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Jena", + "institution": "Friedrich-Schiller-University", + "dsl": "" + } + ], + "personId": 169765 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Hamburg", + "institution": "Universität Hamburg", + "dsl": "" + } + ], + "personId": 169720 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Hamburg", + "institution": "Universität Hamburg", + "dsl": "Human-Computer Interaction" + } + ], + "personId": 170359 + } + ] + }, + { + "id": 170827, + "typeId": 13748, + "title": "Who did it? How User Agency is influenced by Visual Properties of Generated Images", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676335" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4415", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175083, + 171048 + ], + "eventIds": [], + "abstract": "The increasing proliferation of AI and GenAI requires new interfaces tailored to how their specific affordances and human requirements meet. As GenAI is capable of taking over tasks from users on an unprecedented scale, designing the experience of agency -- if and how users experience control over the process and responsibility over the outcome -- is crucial. As an initial step towards design guidelines for shaping agency, we present a study that explores how features of AI-generated images influence users' experience of agency. We use two measures; temporal binding to implicitly estimate pre-reflective agency and magnitude estimation to assess user judgments of agency. We observe that abstract images lead to more temporal binding than images with semantic meaning. In contrast, the closer an image aligns with what a user might expect, the higher the agency judgment. When comparing the experiment results with objective metrics of image differences, we find that temporal binding results correlate with semantic differences, while agency judgments are better explained by local differences between images. This work contributes towards a future where agency is considered an important design dimension for GenAI interfaces.", + "authors": [ + { + "affiliations": [ + { + "country": "Ireland", + "state": "", + "city": "Dublin", + "institution": "University College Dublin", + "dsl": "School of Computer Science" + }, + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "Max Planck Institute for Informatics, Saarland Informatics Campus", + "dsl": "Sensorimotor Interaction" + } + ], + "personId": 170448 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "MPI Informatik", + "dsl": "Computer Graphics" + } + ], + "personId": 170587 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "Saxony", + "city": "Dresden", + "institution": "Dresden University of Applied Sciences", + "dsl": "Faculty of Informatics / Mathematics" + }, + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "Max Planck Institute for Informatics, Saarland Informatics Campus", + "dsl": "Sensorimotor Interaction" + } + ], + "personId": 169912 + }, + { + "affiliations": [ + { + "country": "Ireland", + "state": "", + "city": "Dublin", + "institution": "University College Dublin", + "dsl": "School of Computer Science" + } + ], + "personId": 169986 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbruecken", + "institution": "MPI Informatik", + "dsl": "" + } + ], + "personId": 170238 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "Max Planck Institute for Informatics, Saarland Informatics Campus", + "dsl": "Sensorimotor Interaction" + } + ], + "personId": 170436 + } + ] + }, + { + "id": 170828, + "typeId": 13748, + "title": "WaitGPT: Monitoring and Steering Conversational LLM Agent in Data Analysis with On-the-Fly Code Visualization", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676374" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8219", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175067, + 171061 + ], + "eventIds": [], + "abstract": "Large language models (LLMs) support data analysis through conversational user interfaces, as exemplified in OpenAI's ChatGPT (formally known as Advanced Data Analysis or Code Interpreter). Essentially, LLMs produce code for accomplishing diverse analysis tasks. However, presenting raw code can obscure the logic and hinder user verification. To empower users with enhanced comprehension and augmented control over analysis conducted by LLMs, we propose a novel approach to transform LLM-generated code into an interactive visual representation. In the approach, users are provided with a clear, step-by-step visualization of the LLM-generated code in real time, allowing them to understand, verify, and modify individual data operations in the analysis. Our design decisions are informed by a formative study (N=8) probing into user practice and challenges. We further developed a prototype named WaitGPT and conducted a user study (N=12) to evaluate its usability and effectiveness. The findings from the user study reveal that WaitGPT facilitates monitoring and steering of data analysis performed by LLMs, enabling participants to enhance error detection and increase their overall confidence in the results.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170433 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "Hong Kong University of Science and Technology", + "dsl": "" + } + ], + "personId": 169834 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Diego", + "institution": "University of California, San Diego", + "dsl": "Department of Cognitive Science and Design Lab" + } + ], + "personId": 170393 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170577 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Minnesota", + "city": "Minneapolis", + "institution": "University of Minnesota-Twin Cities", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170548 + } + ] + }, + { + "id": 170829, + "typeId": 13751, + "title": "HRI and UIST: Designing Socially Engaging Robot Interfaces", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686705" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24g-9507", + "source": "PCS", + "trackId": 13208, + "tags": [], + "keywords": [], + "sessionIds": [ + 171060 + ], + "eventIds": [], + "abstract": "Human-Robot Interaction (HRI) is a field of study that focuses on the understanding, design, and evaluation of interactions between humans and robots. This workshop aims to bring together researchers interested in exploring the intersection of UIST and HRI. Our goal is to provide attendees with a deeper understanding of the synergies between the two research communities and to inspire better alignment between technical advancements in UIST and their application to social HRI contexts. The workshop will feature interactive demos, prototyping sessions, and discussions to explore key HRI concepts and considerations for designing robot interfaces that facilitate social interactions with humans.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "The Robotics Institute" + } + ], + "personId": 170410 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Wisconsin", + "city": "Madison", + "institution": "University of Wisconsin-Madison", + "dsl": "Department of Computer Sciences" + } + ], + "personId": 169762 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Wisconsin", + "city": "Madison", + "institution": "University of Wisconsin-Madison", + "dsl": "Computer Science, People and Robots Lab" + } + ], + "personId": 169751 + }, + { + "affiliations": [ + { + "country": "Sweden", + "state": "", + "city": "Stockholm", + "institution": "KTH Royal Institute of Technology", + "dsl": "" + } + ], + "personId": 170371 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Wisconsin", + "city": "Madison", + "institution": "University of Wisconsin - Madison", + "dsl": "Department of Computer Sciences" + } + ], + "personId": 169916 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 170830, + "typeId": 13748, + "title": "Desk2Desk: Optimization-based Mixed Reality Workspace Integration for Remote Side-by-side Collaboration", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676339" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9668", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171045, + 175069 + ], + "eventIds": [], + "abstract": "Mixed Reality enables hybrid workspaces where physical and virtual monitors are adaptively created and moved to suit the current environment and needs. However, in shared settings, individual users’ workspaces are rarely aligned and can vary significantly in the number of monitors, available physical space, and workspace layout, creating inconsistencies between workspaces which may cause confusion and reduce collaboration. We present Desk2Desk, an optimization-based approach for remote collaboration in which the hybrid workspaces of two collaborators are fully integrated to enable immersive side-by-side collaboration. The optimization adjusts each user’s workspace in layout and number of shared monitors and creates a mapping between workspaces to handle inconsistencies between workspaces due to physical constraints (e.g. physical monitors). We show in a user study how our system adaptively merges dissimilar physical workspaces to enable immersive side-by-side collaboration, and demonstrate how an optimization-based approach can effectively address dissimilar physical layouts.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170230 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170140 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Dynamic Graphics Project" + } + ], + "personId": 169934 + }, + { + "affiliations": [ + { + "country": "Singapore", + "state": "", + "city": "Singapore", + "institution": "Singapore Management University ", + "dsl": "School of Computing and Information Systems " + } + ], + "personId": 169951 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170395 + } + ] + }, + { + "id": 170831, + "typeId": 13748, + "title": "LlamaTouch: A Faithful and Scalable Testbed for Mobile UI Task Automation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676382" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3200", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171023, + 175084 + ], + "eventIds": [], + "abstract": "The emergent large language/multimodal models facilitate the evolution of mobile agents, especially in mobile UI task automation. However, existing evaluation approaches, which rely on human validation or established datasets to compare agent-predicted actions with predefined action sequences, are unscalable and unfaithful. To overcome these limitations, this paper presents LlamaTouch, a testbed for on-device mobile UI task execution and faithful, scalable task evaluation. By observing that the task execution process only transfers UI states, LlamaTouch employs a novel evaluation approach that only assesses whether an agent traverses all manually annotated, essential application/system states. LlamaTouch comprises three key techniques: (1) On-device task execution that enables mobile agents to interact with realistic mobile environments for task execution. (2) Fine-grained UI component annotation that merges pixel-level screenshots and textual screen hierarchies to explicitly identify and precisely annotate essential UI components with a rich set of designed annotation primitives. (3) A multi-level application state matching algorithm that utilizes exact and fuzzy matching to accurately detect critical information in each screen, even with unpredictable UI layout/content dynamics. LlamaTouch currently incorporates four mobile agents and 496 tasks, encompassing both tasks in the widely-used datasets and our self-constructed ones to cover more diverse mobile applications. Evaluation results demonstrate LlamaTouch’s high faithfulness of evaluation in real-world mobile environments and its better scalability than human validation. LlamaTouch also enables easy task annotation and integration of new mobile agents. Code and dataset are publicly available at https://github.com/LlamaTouch/LlamaTouch.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beijing University of Posts and Telecommunications", + "dsl": "" + } + ], + "personId": 170176 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beijing University of Posts and Telecommunications", + "dsl": "" + } + ], + "personId": 170692 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beijing University of Posts and Telecommunications", + "dsl": "" + } + ], + "personId": 170026 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beijing University of Posts and Telecommunications", + "dsl": "" + } + ], + "personId": 170060 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beijing University of Posts and Telecommunications", + "dsl": "" + } + ], + "personId": 170519 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beijing University of Posts and Telecommunications", + "dsl": "" + } + ], + "personId": 170118 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "Institute for AI Industry Research (AIR)" + } + ], + "personId": 169746 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beijing University of Posts and Telecommunications", + "dsl": "" + } + ], + "personId": 170480 + } + ] + }, + { + "id": 170832, + "typeId": 13749, + "title": "Enabling Advanced Interactions Through Closed-loop Control of Motor Unit Activity After Tetraplegia", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686325" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-3616", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175078 + ], + "eventIds": [], + "abstract": "Studies have shown that most individuals with motor complete spinal cord injuries (SCIs) can activate motor units (MUs) below the injury level, potentially enabling the use of myoelectric controllers for computer tasks. We present a novel wearable neuromotor interface driven by surface electromyography (sEMG). This wearable sEMG interface detects motor unit action potentials (MUAPs) from an array of sensors on the forearm. The MU event rate is translated (i.e. decoded) into discrete or continuous inputs for emulating button presses or joystick inputs. Our study demonstrates real-time detection and decoding of MUAPs in two individuals with tetraplegia, enabling computer task control via a non-invasive interface. Participants calibrated a spike sorting model through periods of rest and tonic activation, mapping MUAP firing rates to various degrees of freedom (DOF) for cursor or character control in tasks and 2D games. Each MUAP was mapped to a separate DOF axis, which we utilized in a Fitts’ Law target acquisition task to evaluate device throughput rate and other performance metrics such as completion rate, initiation time, time to target acquisition, and dial-in time. Our findings highlight MU firing as an effective control input, enabling gaming and social interaction for individuals with tetraplegia. This device's wearability and ease of use offer an innovative human-computer interaction solution that may enable people with SCI to interact freely with computers and other digital devices.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University ", + "dsl": "Mechanical Engineering / NeuroMechatronics Lab" + } + ], + "personId": 170631 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Mechanical Engineering" + } + ], + "personId": 170061 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Mechanical Engineering" + } + ], + "personId": 170574 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Mechanical Engineering" + } + ], + "personId": 170553 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Mechanical Engineering" + } + ], + "personId": 169734 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Mechanical Engineering" + } + ], + "personId": 169881 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 170120 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Burlingame", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 170327 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 169954 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 170180 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 170264 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 170332 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 170028 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 170384 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Reality Labs, Meta Platforms,Inc.", + "dsl": "" + } + ], + "personId": 170148 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "University of Pittsburgh", + "dsl": "" + } + ], + "personId": 170164 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170662 + } + ] + }, + { + "id": 170833, + "typeId": 13748, + "title": "Selfrionette: A Fingertip Force-Input Controller for Continuous Full-Body Avatar Manipulation and Diverse Haptic Interactions", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676409" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5862", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175059, + 171041 + ], + "eventIds": [], + "abstract": "We propose Selfrionette, a controller that uses fingertip force input to drive avatar movements in virtual reality (VR). \r\nThis system enables users to interact with virtual objects and walk in VR using only fingertip force, overcoming physical and spatial constraints. Additionally, by fixing users' fingers, it provides users with counterforces equivalent to the applied force, allowing for diverse and wide dynamic range haptic feedback by adjusting the relationship between force input and virtual movement.\r\nTo evaluate the effectiveness of the proposed method, this paper focuses on hand interaction as a first step.\r\nIn User Study 1, we measured usability and embodiment during reaching tasks under Selfrionette, body tracking, and finger tracking conditions.\r\nIn User Study 2, we investigated whether users could perceive haptic properties such as weight, friction, and compliance under the same conditions as User Study 1.\r\nSelfrionette was found to be comparable to body tracking in realism of haptic interaction, enabling embodied avatar experiences even in limited spatial conditions.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170428 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Nara", + "institution": "Nara Institute of Science and Technology", + "dsl": "" + } + ], + "personId": 169821 + } + ] + }, + { + "id": 170834, + "typeId": 13757, + "title": "Unmake to Remake: Materiality-driven Rapid Prototyping", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24h-1002", + "source": "PCS", + "trackId": 13207, + "tags": [], + "keywords": [], + "sessionIds": [ + 175079, + 171033 + ], + "eventIds": [], + "abstract": "Within the domain of fabrication, the recent strides in Fused Deposition Modeling (FDM) have sparked growing interest in its sustainability. In this work, we analyze the contemporary life cycle of polymers consumed in FDM, a common and accessible fabrication technique. Then we outline the points of design intervention to reduce wasted polymers in fabrication. Specifically, we discuss the design intervention of Filament Wiring, a set of hybrid craft techniques to promote sustainable prototyping and robust applications by highlighting left-over filaments. Our techniques aim to enhance the understanding of filaments as a unique material for hybrid fabrication, fostering creativity. Through our computational design system, end users can generate 3D printable frames, for exploring the possibilities of filament-based fabrication beyond 3D printing. We hope to provoke thought about filament as its own form of material, having capabilities to be made, unmade, and remade repeatedly into various artifacts. With this outlook, we discuss future research avenues, and urge makers and practitioners to value material in any form, quantity, or stage of its life cycle.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "College Station", + "institution": "Texas A&M University", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170320 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Osaka", + "city": "Ibaraki", + "institution": "Ritsumeikan University", + "dsl": "College of Information Science and Engineering" + } + ], + "personId": 171096 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "College Station", + "institution": "Texas A&M University", + "dsl": "Computer Science & Engineering" + } + ], + "personId": 171097 + } + ] + }, + { + "id": 170835, + "typeId": 13757, + "title": "Towards Automated Accessibility Report Generation for Mobile Apps", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24h-1001", + "source": "PCS", + "trackId": 13207, + "tags": [], + "keywords": [], + "sessionIds": [ + 175082, + 171046 + ], + "eventIds": [], + "abstract": "Many apps have basic accessibility issues, like missing labels or low contrast. To supplement manual testing, automated tools can help developers and QA testers find basic accessibility issues, but they can be laborious to use or require writing dedicated tests. To motivate our work, we interviewed eight accessibility QA professionals at a large technology company. From these interviews, we synthesized three design goals for accessibility report generation systems. Motivated by these goals, we developed a system to generate whole app accessibility reports by combining varied data collection methods (e.g., app crawling, manual recording) with an existing accessibility scanner. Many such scanners are based on single-screen scanning, and a key problem in whole app accessibility reporting is to effectively de-duplicate and summarize issues collected across an app. To this end, we developed a screen grouping model with 96.9% accuracy (88.8% F1-score) and UI element matching heuristics with 97% accuracy (98.2% F1-score). We combine these technologies in a system to report and summarize unique issues across an app, and enable a unique pixel-based ignore feature\r\nto help engineers and testers better manage reported issues across their app’s lifetime. We conducted a user study where 19 accessibility engineers and testers used multiple tools to create lists of prioritized issues in the context of an accessibility audit. Our system helped them create lists they were more satisfied with while addressing key limitations of current accessibility scanning tools.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Apple", + "dsl": "" + } + ], + "personId": 170680 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Apple", + "dsl": "" + }, + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170064 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 170510 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 170085 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Oregon", + "city": "Portland", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 170502 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 170274 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 170637 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 169829 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 169967 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Apple", + "dsl": "" + } + ], + "personId": 170699 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Diego", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 170590 + } + ] + }, + { + "id": 170836, + "typeId": 13756, + "title": "IntelliCID: Intelligent Caustics Illumination Device", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686733" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24d-3508", + "source": "PCS", + "trackId": 13203, + "tags": [], + "keywords": [], + "sessionIds": [ + 171027 + ], + "eventIds": [], + "abstract": "Environmental illumination has the power to influence our daily lives to some extent. When combined with the inherent human affinity for nature, the presence of natural illumination, such as caustics and komorebi, can be used to create a relaxing environment. However, if this illumination does not seamlessly adapt to our daily activities, it may become overwhelming and unpleasant. To address this issue, we present IntelliCID, an intelligent and interactive system designed for smart environments. This system is capable of sensing the user's state and adjusting the intensity of the dynamic caustics lighting effect accordingly.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Toyonaka-shi", + "institution": "Osaka University", + "dsl": "Graduate School of Engineering Science" + } + ], + "personId": 170236 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Toyonaka-shi", + "institution": "Osaka University", + "dsl": "Graduate School of Engineering Science" + } + ], + "personId": 170134 + } + ] + }, + { + "id": 170837, + "typeId": 13749, + "title": "Pay Attention! Human-Centric Improvements of LLM-based Interfaces for Assisting Software Test Case Development", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6583", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175067, + 171049 + ], + "eventIds": [], + "abstract": "Implementing automation testing is difficult and as a consequence there is a growing desire for semi-automated software testing systems with humans in the loop. Leveraging the growth of LLMs, recent research has demonstrated LLMs' potential to improve performance on test generation, reporting, and bug triaging. However, relatively little work has explored the interactivity issues that emerge in semi-automated LLM-assisted software test case development.\r\nTo fill this gap, we present two user studies ($N_1=16, N_2=24$) that investigate productivity, creativity, and user attention in three semi-automated LLM-assisted interaction strategies: (1) pre-emptive prompting; (2) buffered response; and (3) guided input.\r\nWe find that pre-emptively prompting the user significantly enhances branch coverage and task creativity by more than 30\\% while reducing user's off-task idle time by up to 48.7\\%.\r\nWe conclude by suggesting concrete research directions applying mixed-initiative principles for LLM-based interactive systems for semi-automated software testing.", + "authors": [ + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Cambridge", + "institution": "University of Cambridge", + "dsl": "Department of Engineering" + } + ], + "personId": 170215 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Cambridge", + "institution": "University of Cambridge", + "dsl": "Department of Engineering" + } + ], + "personId": 169710 + } + ] + }, + { + "id": 170838, + "typeId": 13748, + "title": "ScriptViz: A Visualization Tool to Aid Scriptwriting based on a Large Movie Database", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676402" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4658", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171019, + 175065 + ], + "eventIds": [], + "abstract": "Scriptwriters usually rely on their mental visualization to create a vivid story by using their imagination to see, feel, and experience the scenes they are writing. Besides mental visualization, they often refer to existing images or scenes in movies and analyze the visual elements to create a certain mood or atmosphere. In this paper, we develop a new tool, ScriptViz, to provide external visualization based on a large movie database for the screenwriting process. It retrieves reference visuals on the fly based on scripts’ text and dialogue from a large movie database. The tool provides two types of control on visual elements that enable writers to 1) see exactly what they want with fixed visual elements and 2) see variances in uncertain elements. User evaluation among 15 scriptwriters shows that ScriptViz is able to present scriptwriters with consistent yet diverse visual possibilities, aligning closely with their scripts and helping their creation.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 170293 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 170533 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 170187 + } + ] + }, + { + "id": 170839, + "typeId": 13748, + "title": "Modulating Heart Activity and Task Performance using Haptic Heartbeat Feedback: A Study Across Four Body Placements", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676435" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5626", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171030, + 175072 + ], + "eventIds": [], + "abstract": "This paper explores the impact of vibrotactile haptic feedback on heart activity when the feedback is provided at four different body locations (chest, wrist, neck, and ankle) and with two feedback rates (50 bpm and 110 bpm). A user study found that the neck placement resulted in higher heart rates and lower heart rate variability, and higher frequencies correlated with increased heart rates and decreased heart rate variability. The chest was preferred in self-reported metrics, and neck placement was perceived as less satisfying, harmonious, and immersive. This research contributes to understanding the interplay between psychological experiences and physiological responses when using haptic biofeedback resembling real body signals. ", + "authors": [ + { + "affiliations": [ + { + "country": "New Zealand", + "state": "", + "city": "Auckland", + "institution": "The University of Auckland", + "dsl": "Empathic Computing Laboratory" + } + ], + "personId": 169953 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "Gyeongbuk", + "city": "Pohang", + "institution": "Pohang University of Science and Technology (POSTECH)", + "dsl": "Computer Science and Engineering / Interaction Laboratory" + } + ], + "personId": 169760 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "Gyeongbuk", + "city": "Pohang", + "institution": "Pohang University of Science and Technology (POSTECH)", + "dsl": "Computer Science and Engineering / Interaction Laboratory " + } + ], + "personId": 170454 + }, + { + "affiliations": [ + { + "country": "New Zealand", + "state": "", + "city": "Auckland", + "institution": "The University of Auckland", + "dsl": "Empathic Computing Laboratory" + } + ], + "personId": 170430 + }, + { + "affiliations": [ + { + "country": "Portugal", + "state": "", + "city": "Lisbon", + "institution": "Instituto Superior Técnico, University of Lisbon", + "dsl": "ITI / LARSyS" + } + ], + "personId": 170255 + } + ] + }, + { + "id": 170840, + "typeId": 13757, + "title": "RadarHand: a Wrist-Worn Radar for On-Skin Touch based Proprioceptive Gestures", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24h-1000", + "source": "PCS", + "trackId": 13207, + "tags": [], + "keywords": [], + "sessionIds": [ + 171022, + 175073 + ], + "eventIds": [], + "abstract": "We introduce RadarHand, a wrist-worn wearable with millimetre wave radar that detects on-skin touch-based proprioceptive hand gestures. Radars are robust, private, small, penetrate materials, and require low computation costs. We first evaluated the proprioceptive and tactile perception nature of the back of the hand and found that tapping on the thumb is the least proprioceptive error of all the finger joints, followed by the index finger, middle finger, ring finger, and pinky finger in the eyes-free and high cognitive load situation. Next, we trained deep-learning models for gesture classification. We introduce two types of gestures based on the locations of the back of the hand: generic gestures and discrete gestures. Discrete gestures are gestures that start at specific locations and end at specific locations at the back of the hand, in contrast to generic gestures, which can start anywhere and end anywhere on the back of the hand. Out of 27 gesture group possibilities, we achieved 92% accuracy for a set of seven gestures and 93% accuracy for the set of eight discrete gestures. Finally, we evaluated RadarHand’s performance in real-time under two interaction modes: Active interaction and Reactive interaction. Active interaction is where the user initiates input to achieve the desired output, and reactive interaction is where the device initiates interaction and requires the user to react. We obtained an accuracy of 87% and 74% for active generic and discrete gestures, respectively, as well as 91% and 81.7% for reactive generic and discrete gestures, respectively. We discuss the implications of RadarHand for gesture recognition and directions for future works.", + "authors": [ + { + "affiliations": [ + { + "country": "New Zealand", + "state": "Auckland", + "city": "Auckland C", + "institution": "Massey University", + "dsl": "School of Built Environment" + } + ], + "personId": 170357 + }, + { + "affiliations": [ + { + "country": "New Zealand", + "state": "", + "city": "Auckland", + "institution": "The University of Auckland", + "dsl": "Empathic Computing Lab, Auckland Bioengineering Institute" + } + ], + "personId": 170272 + }, + { + "affiliations": [ + { + "country": "New Zealand", + "state": "Auckland", + "city": "Auckland", + "institution": "The University of Auckland", + "dsl": "Empathic Computing Lab" + } + ], + "personId": 170449 + }, + { + "affiliations": [ + { + "country": "New Zealand", + "state": "", + "city": "Auckland", + "institution": "University of Auckland", + "dsl": "School of Computer Science" + } + ], + "personId": 170250 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Menlo Park", + "institution": "Meta", + "dsl": "Reality Labs" + } + ], + "personId": 170227 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco Bay Area", + "institution": "Archetype AI", + "dsl": "" + } + ], + "personId": 170273 + }, + { + "affiliations": [ + { + "country": "New Zealand", + "state": "Auckland", + "city": "Auckland", + "institution": "University of Auckland", + "dsl": "Computer Science" + } + ], + "personId": 169979 + }, + { + "affiliations": [ + { + "country": "Australia", + "state": "", + "city": "Mawson Lakes", + "institution": "University of South Australia", + "dsl": "ITMS" + } + ], + "personId": 170430 + } + ] + }, + { + "id": 170841, + "typeId": 13749, + "title": "ValueSphere: A Portable Widget for Quick and Easy Shading in Digital Drawings", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686350" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-8764", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175071 + ], + "eventIds": [], + "abstract": "Marker shading is essential for communicating 3D forms in the early stages of product design. Inspired by a technique of marker shading that is widely used by designers, this study introduces ValueSphere, a novel widget for quick and easy shading in digital drawings. Using ValueSphere, the user can set the light direction, find accurate shading values, and apply them to the sketch through intuitive pen and multi-touch gestures. We utilized ValueSphere to shade various design sketches and showcase its usefulness.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + } + ], + "personId": 169919 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + } + ], + "personId": 170146 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "" + } + ], + "personId": 170170 + } + ] + }, + { + "id": 170842, + "typeId": 13749, + "title": "Improving Interface Design in Interactive Task Learning for Hierarchical Tasks based on a Qualitative Study", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686326" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-1790", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175076, + 171049 + ], + "eventIds": [], + "abstract": "Interactive Task Learning (ITL) systems acquire task knowledge from human instructions in natural language interaction. The interaction design of ITL agents for hierarchical tasks stays uncharted. This paper studied Verbal Apprentice Learner(VAL) for gaming, as an ITL example, and qualitatively analyzed the user study data to provide design insights on dialogue language types, task instruction strategies, and error handling. We then proposed an interface design: Editable Hierarchy Knowledge (EHK), as a generic probe for ITL systems for hierarchical tasks.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Georgia", + "city": "Atlanta", + "institution": "Georgia Institute of Technology ", + "dsl": "School of interactive computing" + } + ], + "personId": 169795 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Georgia", + "city": "Atlanta", + "institution": "Georgia Institute of Technology", + "dsl": "School of Interactive Computing" + } + ], + "personId": 169757 + } + ] + }, + { + "id": 170843, + "typeId": 13748, + "title": "SpaceBlender: Creating Context-Rich Collaborative Spaces Through Generative 3D Scene Blending", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676361" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9203", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171045, + 175069 + ], + "eventIds": [], + "abstract": "There is increased interest in using generative AI to create 3D spaces for virtual reality (VR) applications. However, today’s models produce artificial environments, falling short of supporting collaborative tasks that benefit from incorporating the user's physical context. To generate environments that support VR telepresence, we introduce SpaceBlender, a novel pipeline that utilizes generative AI techniques to blend users' physical surroundings into unified virtual spaces. This pipeline transforms user-provided 2D images into context-rich 3D environments through an iterative process consisting of depth estimation, mesh alignment, and diffusion-based space completion guided by geometric priors and adaptive text prompts. In a preliminary within-subjects study, where 20 participants performed a collaborative VR affinity diagramming task in pairs, we compared SpaceBlender with a generic virtual environment and a state-of-the-art scene generation framework, evaluating its ability to create virtual spaces suitable for collaboration. Participants appreciated the enhanced familiarity and context provided by SpaceBlender but also noted complexities in the generative environments that could detract from task focus. Drawing on participant feedback, we propose directions for improving the pipeline and discuss the value and design of blended spaces for different scenarios.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + }, + { + "country": "United Kingdom", + "state": "", + "city": "London", + "institution": "University College London", + "dsl": "Department of Computer Science" + } + ], + "personId": 170708 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + }, + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170445 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 169731 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 170562 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 170070 + } + ] + }, + { + "id": 170844, + "typeId": 13748, + "title": "picoRing: battery-free rings for subtle thumb-to-index input", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676365" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8477", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171022, + 175073 + ], + "eventIds": [], + "abstract": "Smart rings for subtle, reliable finger input offer an attractive path for ubiquitous interaction with wearable computing platforms. \r\nHowever, compared to ordinary rings worn for cultural or fashion reasons, smart rings are much bulkier and less comfortable, largely due to the space required for a battery, which also limits the space available for sensors.\r\nThis paper presents picoRing, a flexible sensing architecture that enables a variety of battery-free smart rings paired with a wristband. \r\nBy inductively connecting a wristband-based sensitive reader coil with a ring-based fully-passive sensor coil, picoRing enables the wristband to stably detect the passive response from the ring via a weak inductive coupling. \r\nWe demonstrate four different rings that support thumb-to-finger interactions like pressing, sliding, or scrolling.\r\nWhen users perform these interactions, the corresponding ring converts each input into a unique passive response through a network of passive switches.\r\nCombining the coil-based sensitive readout with the fully-passive ring design enables a tiny ring that weighs as little as 1.5 g and achieves a 13 cm stable readout despite finger bending, and proximity to metal.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Hongo, Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + }, + { + "country": "United States", + "state": "Washington", + "city": "REDMOND", + "institution": "Meta", + "dsl": "Reality Labs Research" + } + ], + "personId": 169721 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "Reality Labs Research" + } + ], + "personId": 170261 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Reality Labs", + "dsl": "" + } + ], + "personId": 169697 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "Reality Labs Research" + } + ], + "personId": 170096 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Reality Labs", + "dsl": "Meta" + } + ], + "personId": 170585 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Inc.", + "dsl": "Reality Labs Research" + } + ], + "personId": 170425 + } + ] + }, + { + "id": 170845, + "typeId": 13748, + "title": "ComPeer: A Generative Conversational Agent for Proactive Peer Support", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676430" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2493", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175067, + 171061 + ], + "eventIds": [], + "abstract": "Conversational Agents (CAs) acting as peer supporters have been widely studied and demonstrated beneficial for people's mental health. However, previous peer support CAs either are user-initiated or follow predefined rules to initiate the conversations, which may discourage users to engage and build relationships with the CAs for long-term benefits. In this paper, we develop ComPeer, a generative CA that can proactively offer adaptive peer support to users. ComPeer leverages large language models to detect and reflect significant events in the dialogue, enabling it to strategically plan the timing and content of proactive care. In addition, ComPeer incorporates peer support strategies, conversation history, and its persona into the generative messages. Our one-week between-subjects study (N=24) demonstrates ComPeer's strength in providing peer support over time and boosting users' engagement compared to a baseline user-initiated CA. We report users' interaction patterns with ComPeer and discuss implications for designing proactive generative agents to promote people's well-being.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "Guangdong Province", + "city": "Guangzhou", + "institution": "Sun Yat-sen University", + "dsl": "School of Computer and Engineering" + } + ], + "personId": 170080 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Zhuhai", + "institution": "School of Physics and Astronomy", + "dsl": "Sun Yat-sen University" + } + ], + "personId": 170566 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Guangzhou", + "institution": "Sun Yat-sen University", + "dsl": "" + } + ], + "personId": 170634 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Cornell University", + "dsl": "Weill Cornell Medicine" + } + ], + "personId": 170326 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Guangdong Province", + "city": "Zhuhai", + "institution": "Sun Yat-sen University", + "dsl": "School of Artificial Intelligence" + } + ], + "personId": 170092 + } + ] + }, + { + "id": 170846, + "typeId": 13748, + "title": "Don't Mesh Around: Streamlining Manual-Digital Fabrication Workflows with Domain-Specific 3D Scanning", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676385" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4544", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175068, + 171052 + ], + "eventIds": [], + "abstract": "Software-first digital fabrication workflows are often at odds with material-driven approaches to design. Material-driven design is especially critical in manual ceramics, where the craftsperson shapes the form through hands-on engagement. We present the Craft-Aligned Scanner (CAS), a 3D scanning and clay-3D printing system that enables practitioners to design for digital fabrication through traditional pottery techniques. The CAS augments a pottery wheel that has 3D printing capabilities with a precision distance sensor on a vertically oriented linear axis. By increasing the height of the sensor as the wheel turns, we directly synthesize a 3D spiralized toolpath from the geometry of the object on the wheel, enabling the craftsperson to immediately transition from manual fabrication to 3D printing without leaving the tool. We develop new digital fabrication workflows with CAS to augment scanned forms with functional features and add both procedurally and real-time-generated surface textures. CAS demonstrates how 3D printers can support material-first digital fabrication design without foregoing the expressive possibilities of software-based design.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "Department of Mechanical Engineering" + } + ], + "personId": 170101 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Santa Barbara", + "institution": "University of California, Santa Barbara", + "dsl": "Media Arts and Technology" + } + ], + "personId": 170573 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Santa Barbara", + "institution": "University of California, Santa Barbara ", + "dsl": "Media Arts and Technology" + } + ], + "personId": 170435 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Santa Barbara", + "institution": "University of California Santa Barbara", + "dsl": "Media Arts and Technology" + } + ], + "personId": 169813 + } + ] + }, + { + "id": 170847, + "typeId": 13748, + "title": "DisMouse: Disentangling Information from Mouse Movement Data", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676411" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9319", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171029, + 175090 + ], + "eventIds": [], + "abstract": "Mouse movement data contain rich information about users, performed tasks, and user interfaces, but separating the respective components remains challenging and unexplored. As a first step to address this challenge, we propose DisMouse – the first method to disentangle user-specific and user-independent information and stochastic variations from mouse movement data. At the core of our method is an autoencoder trained in a semi-supervised fashion, consisting of a self-supervised denoising diffusion process and a supervised contrastive user identification module. Through evaluations on three datasets, we show that DisMouse 1) captures complementary information of mouse input, hence providing an interpretable framework for modelling mouse movements, 2) can be used to produce refined features, thus enabling various applications such as personalised and variable mouse data generation, and 3) generalises across different datasets. Taken together, our results underline the significant potential of disentangled representation learning for explainable, controllable, and generalised mouse behaviour modelling.", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "University of Stuttgart", + "dsl": " Institute for Visualisation and Interactive Systems" + } + ], + "personId": 169897 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "University of Stuttgart", + "dsl": "Institute for Visualisation and Interactive Systems" + }, + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "University of Stuttgart", + "dsl": "Institute for Modelling and Simulation of Biomechanical Systems" + } + ], + "personId": 169722 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "University of Stuttgart", + "dsl": "" + } + ], + "personId": 170351 + } + ] + }, + { + "id": 170848, + "typeId": 13748, + "title": "Wheeler: A Three-Wheeled Input Device for Usable, Efficient, and Versatile Non-Visual Interaction", + "award": "HONORABLE_MENTION", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676396" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1034", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171029, + 175090 + ], + "eventIds": [], + "abstract": "Blind users rely on keyboards and assistive technologies like screen readers to interact with user interface (UI) elements. In modern applications with complex UI hierarchies, navigating to different UI elements poses a significant accessibility challenge. Users must listen to screen reader audio descriptions and press relevant keyboard keys one at a time. This paper introduces Wheeler, a novel three-wheeled, mouse-shaped stationary input device, to address this issue. Informed by participatory sessions, Wheeler enables blind users to navigate up to three hierarchical levels in an app independently using three wheels instead of navigating just one level at a time using a keyboard. The three wheels also offer versatility, allowing users to repurpose them for other tasks, such as 2D cursor manipulation. A study with 12 blind users indicates a significant reduction (40%) in navigation time compared to using a keyboard. Further, a diary study with our blind co-author highlights Wheeler's additional benefits, such as accessing UI elements with partial metadata and facilitating mixed-ability collaboration.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "University Park", + "institution": "Pennsylvania State University", + "dsl": "College of Information Sciences and Technology" + } + ], + "personId": 169922 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New Hampshire", + "city": "Durham", + "institution": "University of New Hampshire", + "dsl": " Cognitive Assistive Robotics Lab" + } + ], + "personId": 169923 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "State College", + "institution": "Pennsylvania State University", + "dsl": "College of Information Sciences and Technology" + } + ], + "personId": 169816 + }, + { + "affiliations": [ + { + "country": "Bangladesh", + "state": "", + "city": "Dhaka", + "institution": "Innovation Garage Limited", + "dsl": "" + } + ], + "personId": 170081 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Fordham University", + "dsl": "Computer and Information Science" + } + ], + "personId": 170493 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "University Park ", + "institution": "Pennsylvania State University", + "dsl": "College of Information Sciences and Technology" + } + ], + "personId": 170647 + } + ] + }, + { + "id": 170849, + "typeId": 13749, + "title": "NeuroSight: Combining Eye-Tracking and Brain-Computer Interfaces for Context-Aware Hand-Free Camera Interaction", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-9069", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175085 + ], + "eventIds": [], + "abstract": "Technology has blurred the boundaries of our work and private lives. Using touch-free technology can lessen the divide between technology and reality and bring us closer to the immersion we once had before. This work explores the combination of eye-tracking glasses and a brain-computer interface to enable hand-free interaction with the camera without holding or touching it. Different camera modes are difficult to implement without the use of eye-tracking. For example, visual search relies on an object, selecting a region in the scene by touching the touchscreen on your phone. Eye-tracking is used instead, and the fixation point is used to select the intended region. In addition, fixations can provide context for the mode the user wants to execute. For instance, fixations on foreign text could indicate translation mode. Ultimately, multiple touchless gestures create more fluent transitions between our life experiences and technology.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Oshawa", + "institution": "Ontario Tech University", + "dsl": "" + } + ], + "personId": 170416 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Oshawa", + "institution": "Ontario Tech University", + "dsl": "" + } + ], + "personId": 170598 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Oshawa", + "institution": "Ontario Tech University", + "dsl": "" + } + ], + "personId": 170226 + } + ] + }, + { + "id": 170850, + "typeId": 13755, + "title": "Intelligence as Agency", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3586182.3695612" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24f-3102", + "source": "PCS", + "trackId": 13201, + "tags": [], + "keywords": [], + "sessionIds": [ + 171039 + ], + "eventIds": [], + "abstract": "In his 2009 AAAI article, Jonathan Grudin described AI and HCI as “two fields divided by a common focus”, noting how they “competed for intellectual and economic resources”. HCI is in its strongest position yet — with several senior HCI researchers leading human-centered AI teams, organizations, and institutes at major companies and universities. But, there continues to be the risk that history repeats itself: that HCI finds itself primarily reacting to advances in AI, rather than being a coequal discipline that exerts pressures that drive advances in AI as well. In this talk, I will propose two conceptual shifts that more explicitly center HCI values in an era of rapid progress in AI: (1) redefining intelligence as agency, the capacity to meaningfully act, rather than the capacity to perform a task; and (2) formulating design as the delegation of constrained agency, rather than solely the specification of affordances.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "CSAIL" + } + ], + "personId": 170171 + } + ] + }, + { + "id": 170851, + "typeId": 13751, + "title": "Dynamic Abstractions: Building the Next Generation of Cognitive Tools and Interfaces", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686706" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24g-6143", + "source": "PCS", + "trackId": 13208, + "tags": [], + "keywords": [], + "sessionIds": [ + 171057 + ], + "eventIds": [], + "abstract": "This workshop provides a forum to discuss, brainstorm, and prototype the next generation of interfaces that leverage the dynamic experiences enabled by recent advances in AI and the generative capabilities of foundation models. These models simplify complex tasks by generating outputs in various representations (e.g., text, images, videos) through diverse input modalities like natural language, voice, and sketch. They interpret user intent to generate and transform representations, potentially changing how we interact with information and express ideas. Inspired by this potential, technologists, theorists, and researchers are exploring new forms of interaction by building demos and communities dedicated to concretizing and advancing the vision of working with dynamic abstractions. This UIST workshop provides a timely space to discuss AI's impact on how we might design and use cognitive tools (e.g., languages, notations, diagrams). We will explore the challenges, critiques, and opportunities of this space by thinking through and prototyping use cases across various domains.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170040 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Bayreuth", + "institution": "University of Bayreuth", + "dsl": "HCI+AI" + } + ], + "personId": 170152 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "CSAIL" + } + ], + "personId": 170459 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "CSAIL" + } + ], + "personId": 170697 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Quebec", + "city": "Montréal", + "institution": "Université de Montréal", + "dsl": "Montréal HCI" + } + ], + "personId": 170346 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169841 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 169677 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Claremont", + "institution": "Pomona College", + "dsl": "" + } + ], + "personId": 170544 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Santa Clara", + "institution": "Tero Labs", + "dsl": "" + } + ], + "personId": 170713 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "CSAIL" + } + ], + "personId": 170171 + } + ] + }, + { + "id": 170852, + "typeId": 13748, + "title": "Memory Reviver: Supporting Photo-Collection Reminiscence for People with Visual Impairment via a Proactive Chatbot", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676336" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8565", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171024, + 175078 + ], + "eventIds": [], + "abstract": "Reminiscing with photo collections offers significant psychological benefits but poses challenges for people with visual impairment (PVI). Their current reliance on sighted help restricts the flexibility of this activity. In response, we explored using a chatbot in a preliminary study. We identified two primary challenges that hinder effective reminiscence with a chatbot: the scattering of information and a lack of proactive guidance. To address these limitations, we present Memory Reviver, a proactive chatbot that helps PVI reminisce with a photo collection through natural language communication. Memory Reviver incorporates two novel features: (1) a Memory Tree, which uses a hierarchical structure to organize the information in a photo collection; and (2) a Proactive Strategy, which actively delivers information to users at proper conversation rounds. Evaluation with twelve PVI demonstrated that Memory Reviver effectively facilitated engaging reminiscence, enhanced understanding of photo collections, and delivered natural conversational experiences. Based on our findings, we distill implications for supporting photo reminiscence and designing chatbots for PVI.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "Hong Kong University of Science and Technology", + "dsl": "" + } + ], + "personId": 170073 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "" + } + ], + "personId": 170596 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "Hong Kong University of Science and Technology", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 169833 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Hong Kong", + "city": "Hong Kong SAR", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "IIP(Computational Media and Arts)" + } + ], + "personId": 169716 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170166 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Rochester", + "institution": "University of Rochester", + "dsl": "Department of Computer Science" + } + ], + "personId": 169909 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170577 + } + ] + }, + { + "id": 170853, + "typeId": 13748, + "title": "StegoType: Surface Typing from Egocentric Cameras", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676343" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8680", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175074, + 171031 + ], + "eventIds": [], + "abstract": "Text input is a critical component of any general purpose computing system, yet efficient and natural text input remains a challenge in AR and VR. Headset based hand-tracking has recently become pervasive among consumer VR devices and affords the opportunity to enable touch typing on virtual keyboards. We present an approach for decoding touch typing on uninstrumented flat surfaces using only egocentric camera-based hand-tracking as input. While egocentric hand-tracking accuracy is limited by issues like self occlusion and image fidelity, we show that a sufficiently diverse training set of hand motions paired with typed text can enable a deep learning model to extract signal from this noisy input. \r\nFurthermore, by carefully designing a closed-loop data collection process, we can train an end-to-end text decoder that accounts for natural sloppy typing on virtual keyboards. \r\nWe evaluate our work with a user study (n=18) showing a mean online throughput of 42.4 WPM with an uncorrected error rate (UER) of 7% with our method compared to a physical keyboard baseline of 74.5 WPM at 0.8% UER, showing progress towards unlocking productivity and high throughput use cases in AR/VR.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170240 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170415 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170604 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 169759 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 169958 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 169956 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170308 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Houghton", + "institution": "Michigan Technological University", + "dsl": "Computer Science" + } + ], + "personId": 170580 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170353 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170336 + } + ] + }, + { + "id": 170854, + "typeId": 13748, + "title": "BlendScape: Enabling End-User Customization of Video-Conferencing Environments through Generative AI", + "award": "HONORABLE_MENTION", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676326" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2334", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171045, + 175069 + ], + "eventIds": [], + "abstract": "Today’s video-conferencing tools support a rich range of professional and social activities, but their generic meeting environments cannot be dynamically adapted to align with distributed collaborators’ needs. To enable end-user customization, we developed BlendScape, a rendering and composition system for video-conferencing participants to tailor environments to their meeting context by leveraging AI image generation techniques. BlendScape supports flexible representations of task spaces by blending users’ physical or digital backgrounds into unified environments and implements multimodal interaction techniques to steer the generation. Through an exploratory study with 15 end-users, we investigated whether and how they would find value in using generative AI to customize video-conferencing environments. Participants envisioned using a system like BlendScape to facilitate collaborative activities in the future, but required further controls to mitigate distracting or unrealistic visual elements. We implemented scenarios to demonstrate BlendScape's expressiveness for supporting environment design strategies from prior work and propose composition techniques to improve the quality of environments.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + }, + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170445 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + }, + { + "country": "United Kingdom", + "state": "", + "city": "London", + "institution": "University College London", + "dsl": "Department of Computer Science" + } + ], + "personId": 170708 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 169731 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 170562 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 170070 + } + ] + }, + { + "id": 170855, + "typeId": 13749, + "title": "AITentive: A Toolkit to Develop RL-based Attention Management Systems", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-8060", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175082 + ], + "eventIds": [], + "abstract": "In today's fast-paced world, multitasking is common and affects productivity, decision-making, and cognition. Understanding its complexities is crucial for improving well-being, efficiency, and task management. Attention management systems optimize notification and interruption timings. This work introduces AITentive, an open-source Unity3D toolkit for multitasking research and developing attention management systems with reinforcement learning. The toolkit offers customizable tasks, built-in measurements, and a uniform interface for adding tasks, using Unity ML agents to develop and train attention management systems based on user models.", + "authors": [ + { + "affiliations": [ + { + "country": "Austria", + "state": "", + "city": "Hagenberg", + "institution": "University of Applied Sciences Upper Austria", + "dsl": "Digital Media Department" + } + ], + "personId": 170212 + }, + { + "affiliations": [ + { + "country": "Austria", + "state": "", + "city": "Hagenberg", + "institution": "University of Applied Sciences Upper Austria", + "dsl": "" + } + ], + "personId": 170661 + }, + { + "affiliations": [ + { + "country": "Austria", + "state": "", + "city": "Hagenberg", + "institution": "University of Applied Sciences Upper Austria", + "dsl": "" + }, + { + "country": "Austria", + "state": "", + "city": "Vienna", + "institution": "TU Wien", + "dsl": "" + } + ], + "personId": 170105 + } + ] + }, + { + "id": 170856, + "typeId": 13748, + "title": "SituationAdapt: Contextual UI Optimization in Mixed Reality with Situation Awareness via LLM Reasoning", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676470" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7224", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171045, + 175069 + ], + "eventIds": [], + "abstract": "Mixed Reality is increasingly used in mobile settings beyond controlled home and office spaces. This mobility introduces the need for user interface layouts that adapt to varying contexts. However, existing adaptive systems are designed only for static environments. In this paper, we introduce SituationAdapt, a system that adjusts Mixed Reality UIs to real-world surroundings by considering environmental and social cues in shared settings. Our system consists of perception, reasoning, and optimization modules for UI adaptation. Our perception module identifies objects and individuals around the user, while our reasoning module leverages a Vision-and-Language Model to assess the placement of interactive UI elements. This ensures that adapted layouts do not obstruct relevant environmental cues or interfere with social norms. Our optimization module then generates Mixed Reality interfaces that account for these considerations as well as temporal constraints The evaluation of SituationAdapt is two-fold: We first validate our reasoning component’s capability in assessing UI contexts comparable to human expert users. In an online user study, we then established our system’s capability of producing context-aware MR layouts, where it outperformed adaptive methods from previous work. We further demonstrate the versatility and applicability of SituationAdapt with a set of application scenarios.", + "authors": [ + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH", + "dsl": "Department of Computer Science" + } + ], + "personId": 169913 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zurich", + "dsl": "Computer Science" + } + ], + "personId": 169992 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zürich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 170119 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 169925 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH", + "dsl": "Department of Computer Science" + } + ], + "personId": 170554 + }, + { + "affiliations": [ + { + "country": "Switzerland", + "state": "", + "city": "Zurich", + "institution": "ETH Zürich", + "dsl": "Department of Computer Science" + } + ], + "personId": 170358 + } + ] + }, + { + "id": 170857, + "typeId": 13748, + "title": "TorqueCapsules: Fully-Encapsulated Flywheel Actuation Modules for Designing and Prototyping Movement-Based and Kinesthetic Interaction", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676364" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3421", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175088, + 171032 + ], + "eventIds": [], + "abstract": "Flywheels are unique, versatile actuators that store and convert kinetic energy to torque, widely utilized in aerospace, robotics, haptics, and more. However, prototyping interaction using flywheels is not trivial due to safety concerns, unintuitive operation, and implementation challenges. \r\nWe present TorqueCapsules: self-contained, fully-encapsulated flywheel actuation modules that make the flywheel actuators easy to control, safe to interact with, and quick to reconfigure and customize. By fully encapsulating the actuators with a wireless microcontroller, a battery, and other components, the module can be readily attached, embedded, or stuck to everyday objects, worn to people’s bodies, or combined with other devices. With our custom GUI, both novices and expert users can easily control multiple modules to design and prototype movements and kinesthetic haptics unique to flywheel actuation. We demonstrate various applications, including actuated everyday objects, wearable haptics, and expressive robots. We conducted workshops for novices and experts to employ TorqueCapsules to collect qualitative feedback and further application examples.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170126 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170466 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Independent Researcher", + "dsl": "" + } + ], + "personId": 169946 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170248 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 170858, + "typeId": 13748, + "title": "Feminist Interaction Techniques: Social Consent Signals to Deter NCIM Screenshots", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676380" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1001", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171029, + 175090 + ], + "eventIds": [], + "abstract": "Non-consensual Intimate Media (NCIM) refers to the distribution of sexual or intimate content without consent. NCIM is common and causes significant emotional, financial, and reputational harm. We developed Hands-Off, an interaction technique for messaging applications that deters non-consensual screenshots. Hands-Off requires recipients to perform a hand gesture in the air, above the device, to unlock media—which makes simultaneous screenshotting difficult. A lab study shows that Hands-Off gestures are easy\r\nto perform and reduce non-consensual screenshots by 67%. We conclude by generalizing this approach and introduce the idea of Feminist Interaction Techniques (FIT), interaction techniques that encode feminist values and speak to societal problems, and reflect on FIT’s opportunities and limitations.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 169888 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 169972 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 169975 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "" + } + ], + "personId": 170315 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 169855 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 169733 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170571 + } + ] + }, + { + "id": 170859, + "typeId": 13749, + "title": "Catch That Butterfly: A Multimodal Approach for Detecting and Simulating Gut Feelings", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686340" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-7893", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175085 + ], + "eventIds": [], + "abstract": "Gut feelings are an omnipresent sensation underlying many emotional experiences, ranging from positive to negative emotions and even sixth sense intuition. Predicting and stimulating these feelings can enhance and enrich users emotional experiences. Despite the tremendous potential of gut feelings for HCI design, gut-related signals have been largely ignored in HCI research. In this paper, we introduce an ongoing prototype of an artifact that utilizes both bowel sounds and electrogastrography (EGG) signals to predict and simulate gut feelings. Our prototype consists of an EGG sensing module, a bowel sound sensing module, a machine learning model to process and predict gut churning moments, and a feedback mechanism to simulate gut feelings in the abdominal area. We hope this work opens up a new design space for physiological signal-based interaction and offers novel opportunities for enhancing user experiences. For future work, we plan to build a robust dataset for gut feeling prediction and evaluate our artifact's effectiveness in inducing gut feelings in real users.", + "authors": [ + { + "affiliations": [ + { + "country": "Singapore", + "state": "", + "city": "Singapore", + "institution": "National University of Singapore", + "dsl": "Augmented Human Lab" + } + ], + "personId": 170200 + }, + { + "affiliations": [ + { + "country": "Singapore", + "state": "", + "city": "Singapore", + "institution": "National University of Singapore", + "dsl": "Augmented Human Lab" + } + ], + "personId": 170526 + }, + { + "affiliations": [ + { + "country": "Singapore", + "state": "", + "city": "Singapore ", + "institution": "National University of Singapore ", + "dsl": "Augmented Human Lab, Department of Information Systems and Analytics" + } + ], + "personId": 170670 + }, + { + "affiliations": [ + { + "country": "Singapore", + "state": "", + "city": "Singapore", + "institution": "Department of Information Systems and Analytics, National University of Singapore", + "dsl": "Augmented Human Lab" + } + ], + "personId": 170132 + } + ] + }, + { + "id": 170860, + "typeId": 13748, + "title": "MagneDot: Integrated Fabrication and Actuation Methods of Dot-Based Magnetic Shape Displays", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676427" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6935", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171026, + 175087 + ], + "eventIds": [], + "abstract": "This paper presents MagneDot, a novel method for making interactive magnetic shape displays through an integrated fabrication process. Magnetic soft materials can potentially create fast, responsive morphing structures for interactions. However, novice users and designers typically do not have access to sophisticated equipment and materials or cannot afford heavy labor to create interactive objects based on this material. Modified from an open-source 3D printer, the fabrication system of MagneDot integrates the processes of mold-making, pneumatic extrusion, magnetization, and actuation, using cost-effective materials only. By providing a design tool, MagneDot allows users to generate G-codes for fabricating and actuating displays of various morphing effects. Finally, a series of design examples demonstrate the possibilities of shape displays enabled by MagneDot.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 169901 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 170594 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169915 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 169694 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 170343 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170182 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Hangzhou City University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169747 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Ningbo", + "institution": "College of Science & Technology Ningbo University", + "dsl": "" + } + ], + "personId": 170263 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Hangzhou City University", + "dsl": "" + } + ], + "personId": 169788 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170103 + } + ] + }, + { + "id": 170861, + "typeId": 13748, + "title": "VIME: Visual Interactive Model Explorer for Identifying Capabilities and Limitations of Machine Learning Models for Sequential Decision-Making", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676323" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7903", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175082, + 171046 + ], + "eventIds": [], + "abstract": "Ensuring that Machine Learning (ML) models make correct and meaningful inferences is necessary for the broader adoption of such models into high-stakes decision-making scenarios. Thus, ML model engineers increasingly use eXplainable AI (XAI) tools to investigate the capabilities and limitations of their ML models before deployment. However, explaining sequential ML models, which make a series of decisions at each timestep, remains challenging. We present Visual Interactive Model Explorer (VIME), an XAI toolbox that enables ML model engineers to explain decisions of sequential models in different ``what-if'' scenarios. Our evaluation with 14 ML experts, who investigated two existing sequential ML models using VIME and a baseline XAI toolbox to explore ``what-if'' scenarios, showed that VIME made it easier to identify and explain instances when the models made wrong decisions compared to the baseline. Our work informs the design of future interactive XAI mechanisms for evaluating sequential ML-based decision support systems.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Electrical Engineering and Computer Science" + } + ], + "personId": 170555 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170616 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Altos", + "institution": "Toyota Research Institute", + "dsl": "" + } + ], + "personId": 170084 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Altos", + "institution": "Toyota Research Institute", + "dsl": "" + } + ], + "personId": 170214 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Electrical Engineering and Computer Science" + } + ], + "personId": 170365 + } + ] + }, + { + "id": 170862, + "typeId": 13749, + "title": "ChipQuest: Gamifying the Semiconductor Manufacturing Process to Inspire Future Workforce", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686318" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-9712", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175068, + 171059 + ], + "eventIds": [], + "abstract": "Semiconductor manufacturing is crucial for national economies; however, the industry faces significant talent shortages. While extensive research exists on motivating students in STEM learning, there is little work specifically addressing semiconductor education. To fill this gap, we first examined current barriers and motivational factors influencing students' pursuit of careers in semiconductor fields through interviews with 13 participants. Findings reveal that limited recognition of semiconductor companies relative to software engineering poses a barrier, while early exposure to the field and hands-on experience emerge as pivotal factors motivating prospective students. Drawing upon these insights, we introduce ChipQuest, an educational game designed to enhance K-12 students' engagement and interest in semiconductors. ChipQuest integrates gamification elements to simulate the complexities of semiconductor chip manufacturing, featuring a pedagogical agent, interactive tasks, a reward system, and competitive components. By incorporating gaming principles into semiconductor education, ChipQuest aims to offer a promising approach to inspire young students as the future workforce in the semiconductor industry.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Florida", + "city": "Gainesville", + "institution": "University of Florida", + "dsl": "" + } + ], + "personId": 169711 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Florida", + "city": "Gainesville", + "institution": "University of Florida", + "dsl": "Digital Worlds Institute" + } + ], + "personId": 170455 + } + ] + }, + { + "id": 170863, + "typeId": 13744, + "title": "CookAR: Affordance Augmentations in Wearable AR to Support Kitchen Tool Interactions for People with Low Vision", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1108", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Cooking is a central activity of daily living, supporting independence and both mental and physical health. However, prior work has highlighted key barriers for people with low vision (LV) to cook, particularly around safely interacting with cooking tools, such as sharp knives or hot pans. Drawing on recent advancements in computer vision (CV) and robotics, we present CookAR, a head-mounted AR system with real-time object affordance augmentations to support safe and efficient interactions with kitchen tools. To design and implement CookAR, we manually collected and annotated the first egocentric dataset of kitchen tool affordances, fine-tuned an affordance segmentation model, and leveraged a stereo camera attached to an AR headset to generate the visual augmentations. To validate CookAR, we conducted a technical performance evaluation and a three-part qualitative lab study with ten LV participants. Our technical evaluation demonstrates that our fine-tuned model outperforms the base model on our class-specific dataset, while our user study indicates a preference for affordance augmentations over the traditional whole object augmentations.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 169998 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170049 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170018 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170469 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Seoul", + "institution": "Sungkyunkwan University", + "dsl": "Sungkyunkwan University" + } + ], + "personId": 169830 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Information School" + } + ], + "personId": 169988 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170537 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170475 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Wisconsin", + "city": "Madison", + "institution": "University of Wisconsin-Madison", + "dsl": "Department of Computer Sciences" + } + ], + "personId": 170037 + } + ] + }, + { + "id": 170864, + "typeId": 13744, + "title": "Thermal In Motion: Designing Thermal Flow Illusions with Tactile and Thermal Interaction", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1109", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "This study presents a novel method for creating moving thermal sensations by integrating the thermal referral illusion and tactile motion. Through three experiments conducted on human forearms, we investigated this phenomenon. The first experiment examined how temperature and the spatial placement of the thermal actuator influenced perceived thermal motion. The results showed a distinct perception of thermal motion under both hot and cold conditions, with the clearest perception achieved when the thermal actuator was centrally positioned on the forearm. In the second experiment, we focused on the temporal aspect of our method by determining the upper and lower thresholds of perceived thermal motion speed in hot and cold conditions. The findings revealed a wider speed range of detectable thermal motion in hot conditions (ranging from 1.8 cm/s to 9.5 cm/s) compared to cold conditions (ranging from 2.4 cm/s to 5.0 cm/s). Finally, we integrated our approach into virtual reality (VR) to assess its feasibility through two interaction scenarios. Our results shed light on the comprehension of thermal perception and its integration with tactile cues, promising significant advancements in incorporating thermal motion into diverse thermal interfaces for immersive VR experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Erik Jonsson School of Engineering and Computer Science" + } + ], + "personId": 170331 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170090 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "" + } + ], + "personId": 169941 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170424 + } + ] + }, + { + "id": 170865, + "typeId": 13749, + "title": "Exploring a Software Tool for Biofibers Design", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686317" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6207", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175080 + ], + "eventIds": [], + "abstract": "The Biofibers Spinning Machine produces bio-based fibers (biofibers) that are dissolvable and biodegradable. These fibers enable recycling of smart textiles by making it easy to separate electronics from textiles. Currently, prototyping with the machine requires the use of low-level commands, i.e. G-code. To enable more people to participate in the sustainable smart textiles design space and develop new biofiber materials, we need to provide accessible tools and workflows. This work explores a software tool that facilitates material exploration with machine parameters. We describe the interface design and demonstrate using the tool to quantify the relationship between machine parameters and spun gelatin biofibers.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Colorado", + "city": "Boulder", + "institution": "University of Colorado Boulder", + "dsl": "ATLAS Institute" + } + ], + "personId": 170702 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Colorado", + "city": "Boulder", + "institution": "University of Colorado Boulder", + "dsl": "ATLAS Institute and Department of Computer Science" + } + ], + "personId": 169879 + } + ] + }, + { + "id": 170866, + "typeId": 13748, + "title": "SonifyAR: Context-Aware Sound Generation in Augmented Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676406" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8697", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171034, + 175091 + ], + "eventIds": [], + "abstract": "Sound plays a crucial role in enhancing user experience and immersiveness in Augmented Reality (AR). However, current platforms lack support for AR sound authoring due to limited interaction types, challenges in collecting and specifying context information, and difficulty in acquiring matching sound assets. We present SonifyAR, an LLM-based AR sound authoring system that generates context-aware sound effects for AR experiences. SonifyAR expands the current design space of AR sound and implements a Programming by Demonstration (PbD) pipeline to automatically collect contextual information of AR events, including virtual-content-semantics and real-world context. This context information is then processed by a large language model to acquire sound effects with Recommendation, Retrieval, Generation, and Transfer methods. To evaluate the usability and performance of our system, we conducted a user study with eight participants and created five example applications, including an AR-based science experiment, and an assistive application for low-vision AR users.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science and Engineering" + } + ], + "personId": 169981 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170537 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170317 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Jose", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 170414 + } + ] + }, + { + "id": 170867, + "typeId": 13748, + "title": "Chromaticity Gradient Mapping for Interactive Control of Color Contrast in Images and Video", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676340" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9300", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175061, + 171042 + ], + "eventIds": [], + "abstract": "We present a novel perceptually-motivated interactive tool for using color contrast to enhance details represented in the lightness channel of images and video. Our method lets users adjust the perceived contrast of different details by manipulating local chromaticity while preserving the original lightness of individual pixels. Inspired by the use of similar chromaticity mappings in painting, our tool effectively offers contrast along a user-selected gradient of chromaticities as additional bandwidth for representing and enhancing different details in an image. We provide an interface for our tool that closely resembles the familiar design of tonal contrast curve controls that are available in most professional image editing software. We show that our tool is effective for enhancing the perceived contrast of details without altering lightness in an image and present many examples of effects that can be achieved with our method on both images and video.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New Jersey", + "city": "Princeton", + "institution": "Princeton University", + "dsl": "" + } + ], + "personId": 170074 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "" + } + ], + "personId": 169819 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "" + } + ], + "personId": 170266 + } + ] + }, + { + "id": 170868, + "typeId": 13744, + "title": "Bluefish: Composing Diagrams with Declarative Relations", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1100", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Diagrams are essential tools for problem-solving and communication as they externalize conceptual structures using spatial relationships. But when picking a diagramming framework, users are faced with a dilemma. They can either use a highly expressive but low-level toolkit, whose API does not match their domain-specific concepts, or select a high-level typology, which offers a recognizable vocabulary but supports a limited range of diagrams. To address this gap, we introduce Bluefish: a diagramming framework inspired by component-based user interface (UI) libraries. Bluefish lets users create diagrams using relations: declarative, composable, and extensible diagram fragments that relax the concept of a UI component. Unlike a component, a relation does not have sole ownership over its children nor does it need to fully specify their layout. To render diagrams, Bluefish extends a traditional tree-based scenegraph to a compound graph that captures both hierarchical and adjacent relationships between nodes. To evaluate our system, we construct a diverse example gallery covering many domains including mathematics, physics, computer science, and even cooking. Finally, through collaboration with a professional creative coder, we find that Bluefish's relationship to UI abstractions can ease a user's introduction to the framework, and the expressiveness of relations suggests new patterns of composition.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "CSAIL" + } + ], + "personId": 170697 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "" + } + ], + "personId": 169727 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "Visualization Group" + } + ], + "personId": 170141 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Ottawa", + "institution": "N/A", + "dsl": "" + } + ], + "personId": 169787 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "" + } + ], + "personId": 170678 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "CSAIL" + } + ], + "personId": 170171 + } + ] + }, + { + "id": 170869, + "typeId": 13744, + "title": "Demonstrating XDTK: Prototyping Multi-Device Interaction and Arbitration in XR", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686784" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1101", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "mixed reality", + "toolkit", + "cross-device", + "multi-device", + "interaction" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "The interaction space of XR head-mounted devices can be extended by leveraging other digital devices, such as phones, tablets, and smartwatches. We present a demonstration of XDTK (Cross-Device Toolkit), an open-sourced prototyping toolkit for multi-device interactions in XR. The toolkit consists of: (1) an Android app that runs on client devices and surfaces pose, touch, and other sensor data to a (2) Unity server that can be added to any Unity-based XR application. For this demo, we specifically apply XDTK toward a few example applications, including multi-device arbitration. By leveraging relative pose data from each device, we can infer which device the user is gazing at so as to seamlessly hand off control and display between multiple devices. We also show examples leveraging a tablet sketching and a smartwatch for menu navigation.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170507 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "Google AR" + } + ], + "personId": 169910 + }, + { + "affiliations": [ + { + "country": "India", + "state": "California", + "city": "Mountain View", + "institution": "Google Inc", + "dsl": "" + } + ], + "personId": 169891 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170006 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170270 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + }, + { + "country": "United States", + "state": "Illinois", + "city": "Evanston", + "institution": "Northwestern University", + "dsl": "Computer Science" + } + ], + "personId": 169889 + } + ] + }, + { + "id": 170870, + "typeId": 13744, + "title": "Embodied AR Language Learning Through Everyday Object Interactions: A Demonstration of EARLL", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686746" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1103", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "augmented reality", + "embodied language learning", + "computer vision" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Learning a new language is an exciting and important yet often challenging goal. To support foreign language acquisition, we introduce EARLL, an embodied and context-aware language learning application for AR glasses. EARLL leverages real-time computer vision and depth sensing to continuously segment and localize objects in users' surroundings, check for hand-object manipulations, and then subtly trigger foreign vocabulary prompts relevant to that object. In this demo paper, we present our initial EARLL prototype and highlight current challenges and future opportunities with always-available, wearable, embodied AR language learning.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 169998 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Seoul", + "institution": "Seoul National University", + "dsl": "" + } + ], + "personId": 170257 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Seoul", + "institution": "Sungkyunkwan University", + "dsl": "Sungkyunkwan University" + } + ], + "personId": 169830 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 169885 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170537 + } + ] + }, + { + "id": 170871, + "typeId": 13744, + "title": "PyLips: an Open-Source Python Package to Expand Participation in Embodied Interaction", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686747" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1104", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Human-robot Interaction", + "Screen-based Face", + "Personalization", + "Customization" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": " We demonstrate PyLips, a Python package for expanding access to screen-based facial interfaces for text-to-speech. PyLips can be used to rapidly develop social interactions for a wide variety of applications. We designed PyLips to be easy to use for novice users and expressive for experienced interaction designers. We demonstrate key features of PyLips: compatibility across devices, customizable face appearance, and automated lip synching for text inputs. PyLips can be found at https://github.com/interaction-lab/PyLips.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "LOS ANGELES", + "institution": "University of Southern California", + "dsl": "Computer Science Department" + } + ], + "personId": 170681 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "University of Southern California", + "dsl": "Computer Science" + } + ], + "personId": 170249 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University ", + "dsl": "Robotics Institute" + } + ], + "personId": 170237 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "University of Southern California", + "dsl": "" + } + ], + "personId": 170375 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "University of Southern California", + "dsl": "" + } + ], + "personId": 170381 + } + ] + }, + { + "id": 170872, + "typeId": 13744, + "title": "Augmented Physics: Creating Interactive and Embedded Physics Simulations from Static Textbook Diagrams", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1105", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We introduce Augmented Physics, a machine learning-integrated authoring tool designed for creating embedded interactive physics simulations from static textbook diagrams. Leveraging recent advancements in computer vision, such as Segment Anything and Multi-modal LLMs, our web-based system enables users to semi-automatically extract diagrams from physics textbooks and generate interactive simulations based on the extracted content. These interactive diagrams are seamlessly integrated into scanned textbook pages, facilitating interactive and personalized learning experiences across various physics concepts, such as optics, circuits, and kinematics. Drawing from an elicitation study with seven physics instructors, we explore four key augmentation strategies: 1) augmented experiments, 2) animated diagrams, 3) bi-directional binding, and 4) parameter visualization. We evaluate our system through technical evaluation, a usability study (N=12), and expert interviews (N=12). Study findings suggest that our system can facilitate more engaging and personalized learning experiences in physics education.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169924 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "College Station", + "institution": "Texas A&M University", + "dsl": "" + } + ], + "personId": 170022 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169878 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 170167 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169841 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169785 + } + ] + }, + { + "id": 170873, + "typeId": 13748, + "title": "TouchpadAnyWear: Textile-Integrated Tactile Sensors for Multimodal High Spatial-Resolution Touch Inputs with Motion Artifacts Tolerance", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676344" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4885", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171020, + 175086 + ], + "eventIds": [], + "abstract": "This paper presents TouchpadAnyWear, a novel family of textile-integrated force sensors capable of multi-modal touch input, encompassing micro-gesture detection, two-dimensional (2D) continuous input, and force-sensitive strokes. This thin (\\textless 1.5~mm) and conformal device features high spatial resolution sensing and motion artifact tolerance through its unique capacitive sensor architecture. The sensor consists of a knitted textile compressive core, sandwiched by stretchable silver electrodes, and conductive textile shielding layers on both sides. With a high-density sensor pixel array (25/cm\\textsuperscript{2}), TouchpadAnyWear can detect touch input locations and sizes with millimeter-scale spatial resolution and a wide range of force inputs (0.05~N to 20~N). The incorporation of miniature polymer domes, referred to as ``poly-islands'', onto the knitted textile locally stiffens the sensing areas, thereby reducing motion artifacts during deformation. These poly-islands also provide passive tactile feedback to users, allowing for eyes-free localization of the active sensing pixels. Design choices and sensor performance are evaluated using in-depth mechanical characterization. Demonstrations include an 8-by-8 grid sensor as a miniature high-resolution touchpad and a T-shaped sensor for thumb-to-finger micro-gesture input. User evaluations validate the effectiveness and usability of TouchpadAnyWear in daily interaction contexts, such as tapping, forceful pressing, swiping, 2D cursor control, and 2D stroke-based gestures. This paper further discusses potential applications and explorations for TouchpadAnyWear in wearable smart devices, gaming, and augmented reality devices.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Missouri", + "city": "Saint Louis", + "institution": "Washington University in St. Louis", + "dsl": "Department of Electrical & Systems Engineering" + }, + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Platforms, Inc.", + "dsl": "Meta Reality Labs Research" + } + ], + "personId": 170231 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "Reality Labs Research" + } + ], + "personId": 169952 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Platforms, Inc.", + "dsl": "Meta Reality Labs Research" + } + ], + "personId": 170654 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 170509 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Platforms, Inc.", + "dsl": "Meta Reality Labs Research" + } + ], + "personId": 170570 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Facebook", + "dsl": "Facebook Reality Labs" + } + ], + "personId": 169706 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Platforms, Inc.", + "dsl": "Meta Reality Labs Research" + } + ], + "personId": 170321 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Reality Labs", + "dsl": "" + } + ], + "personId": 170114 + } + ] + }, + { + "id": 170874, + "typeId": 13748, + "title": "CookAR: Affordance Augmentations in Wearable AR to Support Kitchen Tool Interactions for People with Low Vision", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676449" + } + }, + "recognitionIds": [ + 10094 + ], + "isBreak": false, + "importedId": "uist24a-5611", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171025, + 175075 + ], + "eventIds": [], + "abstract": "Cooking is a central activity of daily living, supporting independence as well as mental and physical health. However, prior work has highlighted key barriers for people with low vision (LV) to cook, particularly around safely interacting with tools, such as sharp knives or hot pans. Drawing on recent advancements in computer vision (CV), we present CookAR, a head-mounted AR system with real-time object affordance augmentations to support safe and efficient interactions with kitchen tools. To design and implement CookAR, we collected and annotated the first egocentric dataset of kitchen tool affordances, fine-tuned an affordance segmentation model, and developed an AR system with a stereo camera to generate visual augmentations. To validate CookAR, we conducted a technical evaluation of our fine-tuned model as well as a qualitative lab study with 10 LV participants for suitable augmentation design. Our technical evaluation demonstrates that our model outperforms the baseline on our tool affordance dataset, while our user study indicates a preference for affordance augmentations over the traditional whole object augmentations.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 169998 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170049 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170018 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170469 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Suwon", + "institution": "Sungkyunkwan University", + "dsl": "Sungkyunkwan University" + } + ], + "personId": 169830 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 169988 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170537 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170475 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Wisconsin", + "city": "Madison", + "institution": "University of Wisconsin-Madison", + "dsl": "Department of Computer Sciences" + } + ], + "personId": 170037 + } + ] + }, + { + "id": 170875, + "typeId": 13748, + "title": "EgoTouch: On-Body Touch Input Using AR/VR Headset Cameras", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676455" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3796", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175077, + 171047 + ], + "eventIds": [], + "abstract": "In augmented and virtual reality (AR/VR) experiences, a user’s arms and hands can provide a convenient and tactile surface for touch input. Prior work has shown on-body input to have significant speed, accuracy, and ergonomic benefits over in-air interfaces, which are common today. In this work, we demonstrate high accuracy, bare hands (i.e., no special instrumentation of the user) skin input using just an RGB camera, like those already integrated into all modern XR headsets. Our results show this approach can be accurate, and robust across diverse lighting conditions, skin tones, and body motion (e.g., input while walking). Finally, our pipeline also provides rich input metadata including touch force, finger identification, angle of attack, and rotation. We believe these are the requisite technical ingredients to more fully unlock on-skin interfaces that have been well motivated in the HCI literature but have lacked robust and practical methods.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170386 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170374 + } + ] + }, + { + "id": 170876, + "typeId": 13748, + "title": "RFTIRTouch: Touch Sensing Device for Dual-sided Transparent Plane Based on Repropagated Frustrated Total Internal Reflection", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676428" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1015", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171056, + 175062 + ], + "eventIds": [], + "abstract": "Frustrated total internal reflection (FTIR) imaging is widely applied in various touch-sensing systems. However, vision-based touch sensing has structural constraints, and the system size tends to increase. Although a sensing system with reduced thickness has been developed recently using repropagated FTIR (RFTIR), it lacks the property of instant installation anywhere because observation from the side of a transparent medium is required. Therefore, this study proposes an \"RFTIRTouch\" sensing device to capture RFTIR images from the contact surface. RFTIRTouch detects the touch position on a dual-sided plane using a physics-based estimation and can be retrofitted to existing transparent media with simple calibration. Our evaluation experiments confirm that the touch position can be estimated within an error of approximately 2.1 mm under optimal conditions. Furthermore, several application examples are implemented to demonstrate the advantages of RFTIRTouch, such as its ability to measure dual sides with a single sensor and waterproof the contact surface.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Hiratsuka", + "institution": "Tokai University", + "dsl": "" + } + ], + "personId": 170683 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Hiratsuka", + "institution": "Tokai University", + "dsl": "" + } + ], + "personId": 170204 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Hiratsuka", + "institution": "Tokai University", + "dsl": "" + } + ], + "personId": 170484 + } + ] + }, + { + "id": 170877, + "typeId": 13748, + "title": "VirtualNexus: Enhancing 360-Degree Video AR/VR Collaboration with Environment Cutouts and Virtual Replicas", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676377" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6821", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175061, + 171042 + ], + "eventIds": [], + "abstract": "Asymmetric AR/VR collaboration systems bring a remote VR user to a local AR user’s physical environment, allowing them to communicate and work within a shared virtual/physical space. Such systems often display the remote environment through 3D reconstructions or 360° videos. While 360° cameras stream an environment in higher quality, they lack spatial information, making them less interactable. We present VirtualNexus, an AR/VR collaboration system that enhances 360° video AR/VR collaboration with environment cutouts and virtual replicas. VR users can define cutouts of the remote environment to interact with as a world-in-miniature, and their interactions are synchronized to the local AR perspective. Furthermore, AR users can rapidly scan and share 3D virtual replicas of physical objects using neural rendering. We demonstrated our system’s utility through 3 example applications and evaluated our system in a dyadic usability test. VirtualNexus extends the interaction space of 360° telepresence systems, offering improved physical presence, versatility, and clarity in interactions. ", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Department of Computer Science" + } + ], + "personId": 169793 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Department of Computer Science" + } + ], + "personId": 169874 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Department of Computer Science" + } + ], + "personId": 170472 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Department of Computer Science" + } + ], + "personId": 170349 + } + ] + }, + { + "id": 170878, + "typeId": 13748, + "title": "AutoSpark: Supporting Automobile Appearance Design Ideation with Kansei Engineering and Generative AI", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676337" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2347", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171028, + 175071 + ], + "eventIds": [], + "abstract": "Rapid creation of novel product appearance designs that align with consumer emotional requirements poses a significant challenge. Text-to-image models, with their excellent image generation capabilities, have demonstrated potential in providing inspiration to designers. However, designers still encounter issues including aligning emotional needs, expressing design intentions, and comprehending generated outcomes in practical applications. To address these challenges, we introduce AutoSpark, an interactive system that integrates Kansei Engineering and generative AI to provide creativity support for designers in creating automobile appearance designs that meet emotional needs. AutoSpark employs a Kansei Engineering engine powered by generative AI and a semantic network to assist designers in emotional need alignment, design intention expression, and prompt crafting. It also facilitates designers' understanding and iteration of generated results through fine-grained image-image similarity comparisons and text-image relevance assessments. The design-thinking map within its interface aids in managing the design process. Our user study indicates that AutoSpark effectively aids designers in producing designs that are more aligned with emotional needs and of higher quality compared to a baseline system, while also enhancing the designers' experience in the human-AI co-creation process.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 170030 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "HangZhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 169728 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 169772 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Ningbo", + "institution": "Zhejiang University", + "dsl": "School of Software Technology" + } + ], + "personId": 170145 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "Geely Holding Group", + "dsl": "Geely Innovation Design Institute" + } + ], + "personId": 170047 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 170083 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Ningbo", + "institution": "Zhejiang University", + "dsl": "School of Software Technology" + } + ], + "personId": 170404 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 169901 + } + ] + }, + { + "id": 170879, + "typeId": 13748, + "title": "OptiBasePen: Mobile Base+Pen Input on Passive Surfaces by Sensing Relative Base Motion Plus Close-Range Pen Position", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676467" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2221", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175063, + 171043 + ], + "eventIds": [], + "abstract": "Digital pen input devices based on absolute pen position sensing, such as Wacom Pens, support high-fidelity pen input. However, they require specialized sensing surfaces like drawing tablets, which can have a large desk footprint, constrain the possible input area, and limit mobility. In contrast, digital pens with integrated relative sensing enable mobile use on passive surfaces, but suffer from motion artifacts or require surface contact at all times, deviating from natural pen affordances. We present OptiBasePen, a device for mobile pen input on ordinary surfaces. Our prototype consists of two parts: the \"base\" on which the hand rests and the pen for fine-grained input. The base features a high-precision mouse sensor to sense its own relative motion, and two infrared image sensors to track the absolute pen tip position within the base's frame of reference. This enables pen input on ordinary surfaces without external cameras while also avoiding drift from pen micro-movements. In this work, we present our prototype as well as the general base+pen concept, which combines relative and absolute sensing.", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Stuttgart", + "institution": "University of Stuttgart", + "dsl": "Department of Computer Science" + } + ], + "personId": 170330 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Essen", + "institution": "University of Duisburg-Essen", + "dsl": "Chair for Integrated Information Systems" + } + ], + "personId": 170543 + } + ] + }, + { + "id": 170880, + "typeId": 13748, + "title": "Fiery Hands: Designing Thermal Glove through Thermal and Tactile Integration for Virtual Object Manipulation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676457" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6388", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175066, + 171038 + ], + "eventIds": [], + "abstract": "We present a novel approach to render thermal and tactile feedback to the palm and fingertips through thermal and tactile integration. Our approach minimizes the obstruction of the palm and inner side of the fingers and enables virtual object manipulation while providing localized and global thermal feedback. By leveraging thermal actuators positioned strategically on the outer palm and back of the fingers in interplay with tactile actuators, our approach exploits thermal referral and tactile masking phenomena. Through a series of user studies, we validate the perception of localized thermal sensations across the palm and fingers, showcasing the ability to generate diverse thermal patterns. Furthermore, we demonstrate the efficacy of our approach in VR applications, replicating diverse thermal interactions with virtual objects. This work represents significant progress in thermal interactions within VR, offering enhanced sensory immersion at an optimal energy cost.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "" + } + ], + "personId": 169941 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Erik Jonsson School of Engineering and Computer Science" + } + ], + "personId": 170331 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170058 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170424 + } + ] + }, + { + "id": 170881, + "typeId": 13748, + "title": "AniCraft: Crafting Everyday Objects as Physical Proxies for Prototyping 3D Character Animation in Mixed Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676325" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3794", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175088, + 171032 + ], + "eventIds": [], + "abstract": "We introduce AniCraft, a mixed reality system for prototyping 3D character animation using physical proxies crafted from everyday objects. Unlike existing methods that require specialized equipment to support the use of physical proxies, AniCraft only requires affordable markers, webcams, and daily accessible objects and materials. AniCraft allows creators to prototype character animations through three key stages: selection of virtual characters, fabrication of physical proxies, and manipulation of these proxies to animate the characters. This authoring workflow is underpinned by diverse physical proxies, manipulation types, and mapping strategies, which ease the process of posing virtual characters and mapping user interactions with physical proxies to animated movements of virtual characters. We provide a range of cases and potential applications to demonstrate how diverse physical proxies can inspire user creativity. User experiments show that our system can outperform traditional animation methods for rapid prototyping. Furthermore, we provide insights into the benefits and usage patterns of different materials, which lead to design implications for future research.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Guangzhou", + "institution": "The Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "Computational Media and Arts" + } + ], + "personId": 170135 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170166 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Guangzhou", + "institution": "The Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "Computational Media and Arts" + } + ], + "personId": 170578 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Guangzhou", + "institution": "The Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "Computational Media and Arts" + } + ], + "personId": 170160 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Guangzhou", + "institution": "The Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "Computational Media and Arts" + } + ], + "personId": 170008 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Guangzhou", + "institution": "The Hong Kong University of Science and Technology (Guangzhou)", + "dsl": "Computational Media and Arts" + }, + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 169996 + } + ] + }, + { + "id": 170882, + "typeId": 13748, + "title": "Patterns of Hypertext-Augmented Sensemaking", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676338" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1017", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175070, + 171037 + ], + "eventIds": [], + "abstract": "The early days of HCI were marked by bold visions of hypertext as a transformative medium for augmented sensemaking, exemplified in systems like Memex, Xanadu, and NoteCards. Today, however, hypertext is often disconnected from discussions of the future of sensemaking. In this paper, we investigate how the recent resurgence in hypertext ``tools for thought'' might point to new directions for hypertext-augmented sensemaking. Drawing on detailed analyses of guided tours with 23 scholars, we describe hypertext-augmented use patterns for dealing with the core problem of revisiting and reusing existing/past ideas during scholarly sensemaking. We then discuss how these use patterns validate and extend existing knowledge of hypertext design patterns for sensemaking, and point to new design opportunities for augmented sensemaking.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland College Park", + "dsl": "College of Information Studies" + } + ], + "personId": 169770 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "WebSim", + "dsl": "" + } + ], + "personId": 170588 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Heyday", + "dsl": "" + } + ], + "personId": 169815 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "College of Information Studies" + } + ], + "personId": 170003 + } + ] + }, + { + "id": 170883, + "typeId": 13748, + "title": "LessonPlanner: Assisting Novice Teachers to Prepare Pedagogy-Driven Lesson Plans with Large Language Models", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676390" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6949", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175070, + 171037 + ], + "eventIds": [], + "abstract": "Preparing a lesson plan, e.g., a detailed road map with strategies and materials for instructing a 90-minute class, is beneficial yet challenging for novice teachers. Large language models (LLMs) can ease this process by generating adaptive content for lesson plans, which would otherwise require teachers to create from scratch or search existing resources. In this work, we first conduct a formative study with six novice teachers to understand their needs for support of preparing lesson plans with LLMs. Then, we develop LessonPlanner that assists users to interactively construct lesson plans with adaptive LLM-generated content based on Gagne's nine events. Our within-subjects study (N=12) shows that compared to the baseline ChatGPT interface, LessonPlanner can significantly improve the quality of outcome lesson plans and ease users' workload in the preparation process. Our expert interviews (N=6) further demonstrate LessonPlanner's usefulness in suggesting effective teaching strategies and meaningful educational resources. We discuss concerns on and design considerations for supporting teaching activities with LLMs.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Zhuhai, Guangdong Province", + "institution": "Sun Yat-sen University", + "dsl": "School of Artificial Intelligence" + } + ], + "personId": 169911 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Guangdong Province", + "city": "Zhuhai", + "institution": "School of Artificial Intelligence", + "dsl": "Sun Yat-sen University" + } + ], + "personId": 170648 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Cornell University", + "dsl": "Weill Cornell Medicine" + } + ], + "personId": 170326 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Guangdong Province", + "city": "Zhuhai", + "institution": "Sun Yat-sen University", + "dsl": "School of Artificial Intelligence" + } + ], + "personId": 170092 + } + ] + }, + { + "id": 170884, + "typeId": 13749, + "title": "Seent: Interfacing Gamified Olfactory Training", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686356" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-1440", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175088, + 171049 + ], + "eventIds": [], + "abstract": "Olfactory dysfunction affects a significant proportion of the global population. Although traditional olfactory training can help with rehabilitation, it often lacks engagement due to its repetitive and dull nature. Advances in digital olfactory technologies have made the interaction between humans and odours possible. We introduce Seent, an interactive interface designed to gamify olfactory training, including hardware to digitalise odours and a corresponding Graphical User Interface. Seent integrates playful gameplay into the training process, allowing participants to engage in a series of odour-based interactive games. This paper concludes by outlining future steps and proposing its potential to enhance olfactory rehabilitation and promote regular olfactory health monitoring.", + "authors": [ + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "London", + "institution": "Imperial College London", + "dsl": "" + }, + { + "country": "United Kingdom", + "state": "", + "city": "London", + "institution": "Royal College of Art", + "dsl": "" + } + ], + "personId": 170547 + } + ] + }, + { + "id": 170885, + "typeId": 13748, + "title": "Flip-Pelt: Motor-Driven Peltier Elements for Rapid Thermal Stimulation and Congruent Pressure Feedback in Virtual Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676363" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1932", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175066, + 171038 + ], + "eventIds": [], + "abstract": "This study introduces \"Flip-Pelt,\" a motor-driven peltier device designed to provide rapid thermal stimulation and congruent pressure feedback in virtual reality (VR) environments. Our system incorporates eight motor-driven peltier elements, allowing for the flipping of preheated or cooled elements to the opposite side. In evaluating the Flip-Pelt device, we assess user ability to distinguish between heat/cold sources by their patterns and stiffness, and its impact on enhancing haptic experiences in VR content that involves contact with various thermal sources. Our findings demonstrate that rapid thermal stimulation and congruent pressure feedback provided by Flip-Pelt enhance the recognition accuracy of thermal patterns and the stiffness of virtual objects. These features also improve haptic experiences in VR scenarios through their temporal congruency between tactile and thermal stimuli. Additionally, we discuss the scalability of the Flip-Pelt system to other body parts by proposing design prototypes.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Gwangju", + "institution": "Gwangju Institute of Science and Technology", + "dsl": "Human-Centered Intelligent Systems Lab" + } + ], + "personId": 170479 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Gwangju", + "institution": "Gwangju Institute of Science and Technology", + "dsl": "Human-Centered Intelligent Systems Lab" + } + ], + "personId": 170432 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Gwangju", + "institution": "Gwangju Institute of Science and Technology", + "dsl": "Human-Centered Intelligent Systems Lab" + } + ], + "personId": 170494 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Gwangju", + "institution": "Gwangju Institute of Science and Technology", + "dsl": "Human-Centered Intelligent Systems Lab" + } + ], + "personId": 170191 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Gwangju ", + "institution": "Gwangju Institute of Science and Technology", + "dsl": "Human-Centered Intelligent Systems Lab" + } + ], + "personId": 169763 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Gwangju", + "institution": "Gwangju Institute of Science and Technology", + "dsl": "Human-Centered Intelligent Systems Lab" + } + ], + "personId": 170391 + } + ] + }, + { + "id": 170886, + "typeId": 13748, + "title": "Mul-O: Encouraging Olfactory Innovation in Various Scenarios Through a Task-Oriented Development Platform", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676387" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9073", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175088, + 171032 + ], + "eventIds": [], + "abstract": "Olfactory interfaces are pivotal in HCI, yet their development is hindered by limited application scenarios, stifling the discovery of new research opportunities. This challenge primarily stems from existing design tools focusing predominantly on odor display devices and the creation of standalone olfactory experiences, rather than enabling rapid adaptation to various contexts and tasks. Addressing this, we introduce Mul-O, a novel task-oriented development platform crafted to aid semi-professionals in navigating the diverse requirements of potential application scenarios and effectively prototyping ideas.\r\nMul-O facilitates the swift association and integration of olfactory experiences into functional designs, system integrations, and concept validations. Comprising a web UI for task-oriented development, an API server for seamless third-party integration, and wireless olfactory display hardware, Mul-O significantly enhances the ideation and prototyping process in multisensory tasks. This was verified by a 15-day workshop attended by 30 participants. The workshop produced seven innovative projects, underscoring Mul-O's efficacy in fostering olfactory innovation.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 169920 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 170500 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University ", + "dsl": "The Future Laboratory" + } + ], + "personId": 170601 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 170362 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 170307 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 170695 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 170567 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Lab" + } + ], + "personId": 170639 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Independent Researcher", + "dsl": "" + } + ], + "personId": 169987 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Laboratory" + } + ], + "personId": 170653 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170195 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Lab" + } + ], + "personId": 170341 + } + ] + }, + { + "id": 170887, + "typeId": 13748, + "title": "Accessible Gesture Typing on Smartphones for People with Low Vision", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676447" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1905", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171024, + 175078 + ], + "eventIds": [], + "abstract": "While gesture typing is widely adopted on touchscreen keyboards, its support for low vision users is limited. We have designed and implemented two keyboard prototypes, layout-magnified and key-magnified keyboards, to enable gesture typing for people with low vision. Both keyboards facilitate uninterrupted access to all keys while the screen magnifier is active, allowing people with low vision to input text with one continuous stroke. Furthermore, we have created a kinematics-based decoding algorithm to accommodate the typing behavior of people with low vision. This algorithm can decode the gesture input even if the gesture trace deviates from a pre-defined word template, and the starting position of the gesture is far from the starting letter of the target word. Our user study showed that the key-magnified keyboard achieved 5.28 words per minute, 27.5% faster than a conventional gesture typing keyboard with voice feedback.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York city", + "institution": "Stony Brook University", + "dsl": "Computer Science Department " + } + ], + "personId": 170163 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Stony Brook", + "institution": "Stony Brook University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169890 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Norfolk", + "institution": "Old Dominion University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170042 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "New York University", + "dsl": "" + } + ], + "personId": 170707 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Stony Brook", + "institution": "Stony Brook University", + "dsl": "Computer Science" + } + ], + "personId": 169842 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Stony Brook", + "institution": "Stony Brook University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170153 + } + ] + }, + { + "id": 170888, + "typeId": 13749, + "title": "Fluxable: A Tool for Making 3D Printable Sensors and Actuators", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686342" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6812", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175080 + ], + "eventIds": [], + "abstract": "We present Fluxable, a tool for making custom sensors and actuators 3D printable with customer-grade Stereolithography (SLA) 3D printers. With this tool, the user converts an arbitrary 3D model into a deformable body with integrated helix-and-lattice structures, which comprise a hollow helical channel in the center, lattice paddings, and a wireframe structure on the surface. The tool allows for the parameterization of the helix for sensing performance and customization of the lattice for actuation. By inserting a conductive shape-memory alloy (SMA) into a printed object through the helical channel, the converted shape becomes a sensor to detect various shape-changing behaviors using inductive sensing or an actuator to trigger movements through temperature control. We demonstrated our tool with a series of example sensors and actuators, including an interactive timer, a DJ station, and a caterpillar robot.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 169741 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafeyette", + "institution": "Purdue University", + "dsl": "School of Engineering Technology" + } + ], + "personId": 170057 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "School of Engineering Technology" + } + ], + "personId": 170561 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "Computer Graphics Technology" + } + ], + "personId": 170328 + } + ] + }, + { + "id": 170889, + "typeId": 13749, + "title": "HoloClass: Enhancing VR Classroom with Live Volumetric Video Streaming", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686330" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-4195", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175069 + ], + "eventIds": [], + "abstract": "Virtual Reality (VR) enhances education by creating immersive and engaging learning environments. Volumetric video (VV) further improves VR classrooms by offering realistic, 3D representations of instructors and materials without high development costs. This study introduces HoloClass, a live VV streaming system for VR classrooms. We conducted interviews with 18 students to identify key design needs, resulting in features of HoloClass that support real-time awareness, classroom scalability, and note-taking. Our contributions include empirical insights into designing educational tools with live VV in VR classrooms and the implementation of features that enhance interaction and learning in a virtual setting.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "University of Southern California", + "dsl": "" + } + ], + "personId": 169840 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Minnesota", + "city": "Minneapolis", + "institution": "University of Minnesota", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 169732 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "University of Southern California", + "dsl": "" + } + ], + "personId": 170434 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Fairfax", + "institution": "George Mason University", + "dsl": "" + } + ], + "personId": 169933 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Minnesota", + "city": "Minneapolis", + "institution": "University of Minnesota", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170306 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "University of Southern California", + "dsl": "Department of ECE" + } + ], + "personId": 169872 + } + ] + }, + { + "id": 170890, + "typeId": 13745, + "title": "Enhancing How People Learn Procedural Tasks Through How-to Videos", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686711" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24e-1015", + "source": "PCS", + "trackId": 13202, + "tags": [], + "keywords": [], + "sessionIds": [ + 171036 + ], + "eventIds": [], + "abstract": "Humans learn skills to perform various tasks in their everyday lives. While how-to videos serve as a popular tool for people to learn skills and achieve tasks, there are limitations in learning from videos such as difficulties in accessing information in need or lack of personalized support. My Ph.D. research aims to enhance how people learn procedural tasks through how-to videos by understanding and improving the consumption of video content, application of the content to their own context, and reflection on the experiences. This research presents opportunities and insights into how we could better leverage videos for humans to learn skills and achieve tasks.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "" + } + ], + "personId": 170690 + } + ] + }, + { + "id": 170891, + "typeId": 13745, + "title": "Enabling Safer Augmented Reality Experiences: Usable Privacy Interventions for AR Creators and End-Users", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686708" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24e-1014", + "source": "PCS", + "trackId": 13202, + "tags": [], + "keywords": [], + "sessionIds": [ + 171036 + ], + "eventIds": [], + "abstract": "Augmented reality (AR) is approaching everyday usage, but poses novel privacy concerns for end-users and bystanders due to how AR devices capture users and process physical environments. To enable the benefits of AR while balancing privacy goals, my dissertation develops tools and frameworks to guide AR creators and users to address privacy risks that can arise with AR. First, I explore how to enable AR designers to interactively analyze potential risks in their prototypes through implicit threat modeling within AR authoring tools. Next, through elicitation studies with AR and privacy experts, I contribute frameworks to expand AR interaction models with privacy-friendlier alternatives to traditional AR input, output, and interaction techniques. Lastly, I develop a suite of AI-enabled Privacy Assistant techniques to raise users’ awareness of privacy risks and help them adapt AR interfaces accordingly. Ultimately, my dissertation promotes an AR ecosystem with privacy at the forefront by equipping AR creators and users with a privacy mindset.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170445 + } + ] + }, + { + "id": 170892, + "typeId": 13744, + "title": "RelieFoam: Rapid Prototyping of 2.5D Texture using Laser Cutter", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686741" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1076", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Rapid prototyping", + "Texture", + "Laser processing", + "Foam" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We propose RelieFoam, a rapid prototyping method that uses a laser cutter to produce a 2.5D textured surface on polystyrene foam. \r\nConventional rapid prototyping methods using a laser cutter can quickly create 3D prototypes, but with the limitation that their surfaces can only be flat and smooth.\r\nOur method enables the rapid creation of objects with finely detailed 2.5D textured surfaces. \r\nBy applying localized high-density energy from a laser cutter to low thermal conductivity polystyrene foam, a laser cutter can selectively remove only the specific parts of the surface of the polystyrene foam being targeted.\r\nWe have built a computational model that calculates the laser parameters required to engrave the polystyrene foam in this way. \r\nApplying the laser parameters calculated by our model, we are able to implement applications with haptic textures and visual translucency and thus demonstrate the potential of our method.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "Kanagawa", + "city": "Atsugi", + "institution": "Nippon Telegraph and Telephone Corporation", + "dsl": "NTT Communication Science Laboratories" + } + ], + "personId": 169780 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Kanagawa", + "city": "Atsugi", + "institution": "Nippon Telegraph and Telephone Corporation", + "dsl": "NTT Communication Science Laboratories" + } + ], + "personId": 170626 + } + ] + }, + { + "id": 170893, + "typeId": 13744, + "title": "FlexEOP: Flexible Shape-changing Actuator using Embedded Electroosmotic Pumps", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686785" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1077", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Shape-changing Actuators", + "Shape-changing Display", + "Haptics", + "Soft robotics", + "Fluidics", + "Electroosmotic pump", + "Programmable Materials" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Shape-changing actuators have been widely explored in the field of human-computer interaction (HCI), enabling various applications of shape-changing interfaces across from haptic feedback devices to robotics.\r\nHowever it is still challenging for existing methods to build shape-changing actuators that are flexible, capable of complex shape-changing behaviors, and highly self-contained at the same time.\r\nIn this paper, we proposed FlexEOP, a method to create flexible electroosmotic pumps that are fully composed of flexible materials, facilitating shape-changing actuators with high flexibility and self-containment.\r\nWe introduced the structure of FlexEOP and then demonstrated the design space of FlexEOP, including shape-changing display on flexible strips, panels, and curved surfaces, and a novel design of soft robotic fiber. \r\nBased on FlexEOP, we envision future applications including wearable tactile devices, curved shape-changing displays, and multi-degree-of-freedom self-contained soft robotics.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "The Future Lab" + } + ], + "personId": 170527 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beihang University", + "dsl": "" + } + ], + "personId": 169749 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170209 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170572 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Institute of Software, Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 169753 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 170195 + } + ] + }, + { + "id": 170894, + "typeId": 13748, + "title": "avaTTAR: Table Tennis Stroke Training with On-body and Detached Visualization in Augmented Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676400" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6244", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171051, + 175089 + ], + "eventIds": [], + "abstract": "Table tennis stroke training is a critical aspect of player development. We designed a new augmented reality (AR) system, avaTTAR, for table tennis stroke training. The system provides both “on-body” (first-person view) and “detached” (third-person view)\r\nvisual cues, enabling users to visualize target strokes and correct their attempts effectively with this dual perspectives setup. By employing a combination of pose estimation algorithms and IMU sensors, avaTTAR captures and reconstructs the 3D body pose and paddle orientation of users during practice, allowing real-time comparison with expert strokes. Through a user study, we affirm avaTTAR ’s capacity to amplify player experience and training results", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 170291 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette ", + "institution": "Purdue University", + "dsl": "School of Mechanical Engineering " + } + ], + "personId": 169682 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "Elmore Family School of Electrical and Computer Engineering" + } + ], + "personId": 169699 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 170498 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "Department of Electrical and Computer Engineering " + } + ], + "personId": 169683 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 170456 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 170495 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "School of Mechanical Engineering" + } + ], + "personId": 170253 + } + ] + }, + { + "id": 170895, + "typeId": 13751, + "title": "Bridging disciplines for a new era in Physical AI", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686704" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24g-5198", + "source": "PCS", + "trackId": 13208, + "tags": [], + "keywords": [], + "sessionIds": [ + 171058 + ], + "eventIds": [], + "abstract": "Physical AI is extending models from the digital world into performing tasks in the real world. Robots and autonomous cars are common examples of physical AI agents. In this workshop, we aim to go beyond containing physical AI in those agents and ask what if all objects and materials we interact with were intelligent? We aim to form an understanding of the opportunities and challenges of extending agents to include objects and materials that can adapt to users needs, i.e., change shape, firmness, color, tactile properties, etc. This broad vision, which is challenging to achieve, is related to many active research areas, e.g., programmable matter, modular robotics, soft robotics, smart materials, shape-changing interfaces, or radical atoms, and has homes in many disciplines, incl. mechanical engineering, robotics, material science, computer science. Many new approaches are being developed in the individual disciplines that together might be the start of a new era for what we like to call extended physical AI. In this workshop, we bring perspectives from these different disciplines together to exchange new approaches to longstanding challenges (e.g., actuation, computational design, fabrication, control), exchange tacit knowledge, discuss visions for future applications, map the new grand challenges, and inspire the next generation of physical AI research.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170295 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Mechanical Engineering Department" + } + ], + "personId": 170387 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Mechanical Engineering, Morphing Matter Lab" + } + ], + "personId": 170224 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "University of Pittsburgh", + "dsl": "" + } + ], + "personId": 170623 + } + ] + }, + { + "id": 170896, + "typeId": 13748, + "title": "Thermal In Motion: Designing Thermal Flow Illusions with Tactile and Thermal Interaction", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676460" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8420", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171030, + 175072 + ], + "eventIds": [], + "abstract": "This study presents a novel method for creating moving thermal sensations by integrating the thermal referral illusion with tactile motion. Conducted through three experiments on human forearms, the first experiment examined the impact of temperature and thermal actuator placement on perceived thermal motion, finding the clearest perception with a centrally positioned actuator under both hot and cold conditions. The second experiment identified the speed thresholds of perceived thermal motion, revealing a wider detectable range in hot conditions (1.8 cm/s to 9.5cm/s) compared to cold conditions (2.4cm/s to 5.0cm/s). Finally, we integrated our approach into virtual reality (VR) to assess its feasibility through two interaction scenarios. Our results shed light on the comprehension of thermal perception and its integration with tactile cues, promising significant advancements in incorporating thermal motion into diverse thermal interfaces for immersive VR experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Erik Jonsson School of Engineering and Computer Science" + } + ], + "personId": 170331 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170090 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "" + } + ], + "personId": 169941 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Richardson", + "institution": "University of Texas at Dallas", + "dsl": "Department of Computer Science" + } + ], + "personId": 170424 + } + ] + }, + { + "id": 170897, + "typeId": 13744, + "title": "Demonstration of JetUnit: Rendering Diverse Force Feedback in Virtual Reality Using Water Jets", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686742" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1079", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "haptics", + "water jets", + "force feedback", + "VR" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "JetUnit is a water-based VR haptic system capable of generating a diverse range of perceived force intensities and frequencies through water jets. A key challenge in designing this system was optimizing parameters to enable the haptic device to produce force feedback that closely mimics the most intense force achievable with direct water jets, while ensuring the user remains dry. In this demonstration, we showcase JetUnit by integrating our haptic solutions into various virtual reality interactions, including touch, poking, injection, and recurring pulsing and wave signals.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170517 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170194 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University Of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170082 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "" + } + ], + "personId": 169686 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "Computer Science" + } + ], + "personId": 170113 + } + ] + }, + { + "id": 170898, + "typeId": 13748, + "title": "DexteriSync: A Hand Thermal I/O Exoskeleton for Morphing Finger Dexterity Experience", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676422" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8540", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175066, + 171038 + ], + "eventIds": [], + "abstract": "Skin temperature is an important physiological factor for human hand dexterity. Leveraging this feature, we engineered an exoskeleton, called DexteriSync, that can dynamically adjust the user's finger dexterity and induce different thermal perceptions by modulating finger skin temperature. This exoskeleton comprises flexible silicone-copper tube segments, 3D-printed finger sockets, a 3D-printed palm base, a pump system, and a water temperature control with a storage unit. By realising an embodied experience of compromised dexterity, DexteriSync can help product designers understand the lived experience of compromised hand dexterity, such as that of the elderly and/or neurodivergent users, when designing daily necessities for them. We validated DexteriSync via a technical evaluation and two user studies, demonstrating that it can change skin temperature, dexterity, and thermal perception. An exploratory session with design students and an autistic compromised dexterity individual, demonstrated the exoskeleton provided a more realistic experience compared to video education, and allowed them to gain higher confidence in their designs. The results advocated for the efficacy of experiencing embodied compromised finger dexterity, which can promote an understanding of the related physical challenges and lead to a more persuasive design for assistive tools.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Yokohama", + "institution": "Keio University Graduate School of Media Design", + "dsl": "" + } + ], + "personId": 169688 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Yokohama", + "institution": "Keio University Graduate School of Media Design", + "dsl": "" + } + ], + "personId": 169796 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Yokohama", + "institution": "Keio University Graduate School of Media Design", + "dsl": "" + } + ], + "personId": 170311 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "" + } + ], + "personId": 169686 + } + ] + }, + { + "id": 170899, + "typeId": 13756, + "title": "MetaController: Sheet Material Based Flexible Game Controlling System", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686732" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24d-4910", + "source": "PCS", + "trackId": 13203, + "tags": [], + "keywords": [], + "sessionIds": [ + 171027 + ], + "eventIds": [], + "abstract": "We introduce MetaControllers, reconfigurable game-controlling devices that are made of wood sheets. We intend to use this flexible material system to create devices whose configurations can be customized for different games to create context-specific haptic experiences, which cannot otherwise be achieved by traditional game controllers. Our approach exploits cellular-based material structure units consisting of rigid faces and hinges out of one single material. By assembling units together, we can create higher level of mechanical transformations through either embedded actuation or externally applied compression. In this proposal, we show that the different compositions of these units, when coupled with electronics, can enable novel ways of controlling and interacting with the games that provide dynamic physical affordances and guide users with dynamic physical constraints.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170129 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "School of Architecture" + } + ], + "personId": 169929 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Entertainment Technology Center" + } + ], + "personId": 170529 + } + ] + }, + { + "id": 170900, + "typeId": 13744, + "title": "Can a Smartwatch Move Your Fingers? Compact and Practical Electrical Muscle Stimulation in a Smartwatch", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1081", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Smartwatches gained popularity in the mainstream, making them into today’s de-facto wearables. Despite advancements in sensing, haptics on smartwatches is still restricted to tactile feedback (e.g., vibration). Most smartwatch-sized actuators cannot render strong force-feedback. Simultaneously, electrical muscle stimulation (EMS) promises compact force-feedback but, to actuate fingers requires users to wear many electrodes on their forearms. While forearm electrodes provide good accuracy, they detract EMS from being a practical force-feedback interface. To address this, we propose moving the electrodes to the wrist—conveniently packing them in the backside of a smartwatch. In our first study, we found that by cross-sectionally stimulating the wrist in 1,728 trials, we can actuate thumb extension, index extension & flexion, middle flexion, pinky flexion, and wrist flexion. Following, we engineered a compact muscle stimulator that integrates directly into a smartwatch’s wristband (with a custom stimulator, electrodes, demultiplexers, and communication). In our second study, we found that participants could calibrate our device by themselves ~50% faster than with conventional EMS. Furthermore, all participants preferred the experience of this device, especially for its social acceptability & practicality. We believe that our approach opens new applications for smartwatch-based interactions, such as haptic assistance during everyday tasks (e.g., playing music or exercising).", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169989 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170155 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University Of Chicago", + "dsl": "" + } + ], + "personId": 170095 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170334 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169862 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170376 + } + ] + }, + { + "id": 170901, + "typeId": 13744, + "title": "Demonstration of Sympathetic Orchestra: An Interactive Conducting Education System for Responsive, Tacit Skill Development", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686783" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1083", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Interaction Design", + "Conducting and Music Education" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Students learning musical conducting often practice along to static recordings, which do not provide real-time feedback similar to that of a live orchestra during rehearsals. Novice conductors need better solutions for practicing with feedback that mimics the experience of conducting a live orchestra. We can leverage emergent multimodal and spatial interaction technologies to support a “virtual orchestra” practice experience that allows students to develop tacit, live-practice knowledge. Through formative interviews with conducting experts and students, we designed and developed a dynamic, multimodal interaction system that targets key goals held by students developing their orchestral conducting skills, and that traditional practicing methods lack support for. Sympathetic Orchestra is an interactive virtual orchestra system that uses Google AI edge-powered Computer Vision hand and face tracking on webcam data to responsively interact with dynamic audio music playback and develop tacit practicing experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "College of Engineering" + } + ], + "personId": 169864 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California: Berkeley", + "dsl": "Berkeley Institute of Design Lab" + } + ], + "personId": 170344 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 169754 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Graduate School of Education" + } + ], + "personId": 170063 + } + ] + }, + { + "id": 170902, + "typeId": 13749, + "title": "Collision Prevention in Diminished Reality through the Use of Peripheral Vision", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686346" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-1910", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175072 + ], + "eventIds": [], + "abstract": "Diminished reality (DR) removes virtual and real objects from the user's view, decluttering and optimizing what the user sees through augmented/mixed reality. However, removing real objects create a potential safety concern because users may unintentionally bump into the diminished object. To address this issue, we apply characteristics of peripheral vision to DR and diminish objects when they are in the peripheral region and show the object when the user focuses on the object. We created 3 different object opacity control functions that use gaze information and evaluated them.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "Graduate School of Culture Technology" + } + ], + "personId": 170446 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "Graduate School of Culture Technology" + } + ], + "personId": 169873 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "Graduate School of Culture Technology" + } + ], + "personId": 170178 + } + ] + }, + { + "id": 170903, + "typeId": 13744, + "title": "MindCube: an Interactive Device for Gauging Emotions", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686771" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1084", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Fidget", + "interactive device", + "emotion study" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "This paper introduces the MindCube, an interactive device designed for studying emotions. Resembling a fidget cube toy commonly used for stress and anxiety relief, the MindCube features a compact cubic shape (3.3 cm x 3.3 cm x 3.3 cm), making it small, easy to hold, and ideal for playful interaction. Like a fidget cube toy, each side of the MindCube is equipped with various interactive inputs, including tactile buttons, a small rolling disk, and a joystick. Additionally, the device is fitted with a 9-DoF IMU (Inertial Measurement Unit) to measure real-time orientation when held by the user. Furthermore, the MindCube includes a linear vibration motor to provide haptic feedback to enhance the interactive experience.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "Media Lab, Responsive Environments group" + } + ], + "personId": 170593 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "Responsive Environments, Media Lab" + } + ], + "personId": 170356 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT Media Lab", + "dsl": "Responsive Environments" + } + ], + "personId": 170610 + } + ] + }, + { + "id": 170904, + "typeId": 13744, + "title": "UIClip: A Data-driven Model for Assessing User Interface Design", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1086", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We developed a machine-learned model, UIClip, for assessing the design quality and visual relevance of a UI given its screenshot and natural language description.\r\nThrough training on both synthetic and human-rated data, UIClip implicitly learned properties of good and bad designs to i) assign a UI quality' score and i)) provide design suggestions.\r\nOur demonstration shows example applications of UIClip such as improving UI code generation and generating UI design recommendations.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170064 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170368 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170122 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Apple", + "dsl": "" + } + ], + "personId": 170680 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Apple", + "dsl": "" + } + ], + "personId": 170699 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Diego", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 170590 + } + ] + }, + { + "id": 170905, + "typeId": 13748, + "title": "SolePoser: Real-Time 3D Human Pose Estimation using Insole Pressure Sensors", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676418" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2319", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175077, + 171047 + ], + "eventIds": [], + "abstract": "We propose SolePoser, a real-time 3D pose estimation system that leverages only a single pair of insole sensors. Unlike conventional methods relying on fixed cameras or bulky wearable sensors, our approach offers minimal and natural setup requirements. The proposed system utilizes pressure and IMU sensors embedded in insoles to capture the body weight's pressure distribution at the feet and its 6 DoF acceleration. This information is used to estimate the 3D full-body joint position by a two-stream transformer network. A novel double-cycle consistency loss and a cross-attention module are further introduced to learn the relationship between 3D foot positions and their pressure distributions.\r\nWe also introduced two different datasets of sports and daily exercises, offering 908k frames across eight different activities. Our experiments show that our method's performance is on par with top-performing approaches, which utilize more IMUs and even outperform third-person-view camera-based methods in certain scenarios. ", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Tokyo Institute of Technology", + "dsl": "School of Computing" + }, + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Robotics Institute" + } + ], + "personId": 170658 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Robotics Institute" + } + ], + "personId": 169767 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Tokyo Institute of Technology", + "dsl": "School of Computing" + } + ], + "personId": 170043 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Robotics Institute" + } + ], + "personId": 170427 + } + ] + }, + { + "id": 170906, + "typeId": 13749, + "title": "LingoComics: Co-Authoring Comic Style AI-Empowered Stories for Language Learning Immersion with Story Designer", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-5957", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175064 + ], + "eventIds": [], + "abstract": "In language learning applications, contextualization and immersion rely on real-life communication and remain challenging despite the recent advancements in artificial intelligence (AI) that have significantly impacted educational and language learning experiences. This paper introduces LingoComics, a web application that embeds AI-empowered stories with narrative and comic-style illustrations to enhance contextualization and personalization in language learning. At the core of LingoComics is the Story Designer module, which allows learners to co-author short narratives using a structured set of parameters within a simple user interface. Leveraging OpenAI’s GPT-4-turbo for text completion and DALLE-3 for image generation, the Story Designer generates contextually relatable stories and comic-style images based on user input. Future work includes user evaluations, activity designs, and additional language learning support features. LingoComics aims to increase learners' confidence and motivation by enabling personalized, situational language practice, preparing them for real-life communication.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Oshawa", + "institution": "Ontario Tech University", + "dsl": "" + } + ], + "personId": 169962 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Oshawa", + "institution": "Ontario Tech University", + "dsl": "" + } + ], + "personId": 170226 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Oshawa", + "institution": "Ontario Tech University", + "dsl": "" + } + ], + "personId": 170598 + } + ] + }, + { + "id": 170907, + "typeId": 13748, + "title": "Augmented Physics: Creating Interactive and Embedded Physics Simulations from Static Textbook Diagrams", + "award": "BEST_PAPER", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676392" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3529", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175070, + 171037 + ], + "eventIds": [], + "abstract": "We introduce Augmented Physics, a machine learning-integrated authoring tool designed for creating embedded interactive physics simulations from static textbook diagrams. Leveraging recent advancements in computer vision, such as Segment Anything and Multi-modal LLMs, our web-based system enables users to semi-automatically extract diagrams from physics textbooks and generate interactive simulations based on the extracted content. These interactive diagrams are seamlessly integrated into scanned textbook pages, facilitating interactive and personalized learning experiences across various physics concepts, such as optics, circuits, and kinematics. Drawing from an elicitation study with seven physics instructors, we explore four key augmentation strategies: 1) augmented experiments, 2) animated diagrams, 3) bi-directional binding, and 4) parameter visualization. We evaluate our system through technical evaluation, a usability study (N=12), and expert interviews (N=12). Study findings suggest that our system can facilitate more engaging and personalized learning experiences in physics education.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169924 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "State/Territory", + "city": "Hong Kong", + "institution": "City University of Hong Kong", + "dsl": "" + } + ], + "personId": 170022 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169878 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 170167 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169841 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169785 + } + ] + }, + { + "id": 170908, + "typeId": 13745, + "title": "Nervous System Interception: A New Paradigm for Haptics", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686715" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24e-1021", + "source": "PCS", + "trackId": 13202, + "tags": [], + "keywords": [], + "sessionIds": [ + 171036 + ], + "eventIds": [], + "abstract": "When outputting information to our senses, almost all wearable interfaces follow the same principle: externally generating stimuli (e.g., lights, sounds, vibrations) and then presenting them via devices placed at the endpoints of our sensory system, such as head-mounted displays facing the eyes and vibration motors on the skin. While this intuitive approach of stimulating the endpoints might be sufficient for audiovisual interfaces, we argue that when it comes to engaging the sense of touch (i.e., haptics), it will be insufficient to support a wide variety of interactive experiences. Even a single haptic device on the hand will obstruct users from touching or grabbing objects in the real world, making it undesirable for mixed reality. Let alone scaling this approach to a larger portion of the body, which would restrict the user's whole body. My research introduces an alternative approach to haptic output: instead of stimulating endpoints with external stimuli, we explore interactive devices that “internally” send electrical signals to the user's nervous system—intercepting the nervous system. Our approach creates haptic sensations beyond the point where the device is worn, establishing a basis for enabling haptic feedback while keeping the user's body free and scaling haptic interfaces to work for the entire body. This paper provides an overview of our approach: (1) how intercepting the nerves can provide touch and force feedback without obstructing the user's body with actuators; (2) how it can integrate into practical wearable devices such as a smartwatch; and (3) its potential to eventually generalize to a full-body interface by intercepting the user's brain.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170155 + } + ] + }, + { + "id": 170909, + "typeId": 13745, + "title": "Granting Non-AI Experts Creative Control Over AI Systems", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686714" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24e-1020", + "source": "PCS", + "trackId": 13202, + "tags": [], + "keywords": [], + "sessionIds": [ + 171036 + ], + "eventIds": [], + "abstract": "Many harmful behaviors and problematic deployments of AI stem from the fact that AI experts are not experts in the vast array of settings where AI is applied. Non-AI experts from these domains hold promising potential to contribute their expertise and directly design the AI systems that impact them, but they face substantial technical and effort barriers. Could we redesign AI development tools to match the language of non-technical end users? My research develops novel systems allowing non-AI experts to define AI behavior in terms of interpretable, self-defined concepts. Monolithic, black-box models do not yield such control, so we introduce techniques for users to create many narrow, personalized models that they can better understand and steer. We demonstrate the success of this approach across the AI lifecycle: from designing AI objectives to evaluating AI behavior to authoring end-to-end AI systems. When non-AI experts design AI from start to finish, they notice gaps and build solutions that AI experts could not—such as creating new feed ranking models to mitigate partisan animosity, surfacing underreported issues with content moderation models, and activating unique pockets of LLM behavior to amplify their personal writing style.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Dept. of Computer Science" + } + ], + "personId": 170520 + } + ] + }, + { + "id": 170910, + "typeId": 13749, + "title": "DataPipettor: Touch-Based Information Transfer Interface Using Proximity Wireless Communication", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686333" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-2204", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175077, + 171059 + ], + "eventIds": [], + "abstract": "As diverse computing devices, including wearable and embedded systems, become ubiquitous in our living spaces, sharing and transferring data between these computers has become a usual operation. This paper introduces DataPipettor, an information transfer interface that enables intuitive data movement based on touch interactions. DataPipettor utilizes proximity wireless communication (PWC), allowing high-speed real-time communication between proximate channels while simultaneously achieving touch sensing and data transmission/reception. The interface can be miniaturized due to the characteristics of PWC, making it suitable for use in wearable devices and small equipment interfaces. Users wearing the interface can intuitively transfer data through touch interactions as if physically passing objects. This paper conducted the development and evaluation of the interface, a conceptual demonstration, and discuss future prospects.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170463 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170303 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170583 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170549 + } + ] + }, + { + "id": 170911, + "typeId": 13748, + "title": "Block and Detail: Scaffolding Sketch-to-Image Generation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676444" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3405", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171051, + 175089 + ], + "eventIds": [], + "abstract": "We introduce a novel sketch-to-image tool that aligns with the iterative refinement process of artists. Our tool lets users sketch blocking strokes to coarsely represent the placement and form of objects and detail strokes to refine their shape and silhouettes. We develop a two-pass algorithm for generating high-fidelity images from such sketches at any point in the iterative process. In the first pass we use a ControlNet to generate an image that strictly follows all the strokes (blocking and detail) and in the second pass we add variation by renoising regions surrounding blocking strokes. We also present a dataset generation scheme that, when used to train a ControlNet architecture, allows regions that do not contain strokes to be interpreted as not-yet-specified regions rather than empty space. We show that this partial-sketch-aware ControlNet can generate coherent elements from partial sketches that only contain a small number of strokes. The high-fidelity images produced by our approach serve as scaffolds that can help the user adjust the shape and proportions of objects or add additional elements to the composition. We demonstrate the effectiveness of our approach with a variety of examples and evaluative comparisons. Quantitatively, novice viewers prefer the quality of images from our algorithm over a baseline Scribble ControlNet for 82% of the pairs and found our images had less distortion in 80% of the pairs.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 170477 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 170156 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 170110 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + }, + { + "country": "United States", + "state": "California", + "city": "San Mateo", + "institution": "Roblox", + "dsl": "" + } + ], + "personId": 170187 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 170396 + } + ] + }, + { + "id": 170912, + "typeId": 13749, + "title": "Piezoelectric Sensing of Mask Surface Waves for Noise-Suppressive Speech Input", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686331" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6387", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175062 + ], + "eventIds": [], + "abstract": "Voice-controlled medical devices in operating rooms face challenges due to face masks and noise, hindering speech recognition accuracy. We present a novel solution: a detachable piezoelectric microphone attached to a face mask, prioritizing hygiene and comfort. By capturing surface vibrations from the user's voice, our system significantly improves scale-invariant signal-to-distortion ratio (Si-SNR) compared to traditional unidirectional microphones, especially for whispered speech, as demonstrated in a simulated noisy environment. This real-time, noise-suppressive approach offers a promising avenue for enhancing voice input and conversations in various applications and settings.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "Tokyo", + "city": "Bunkyo", + "institution": "The University of Tokyo", + "dsl": "Graduate School of Interdisciplinary Information Studies" + }, + { + "country": "Japan", + "state": "Chiba", + "city": "Kashiwa", + "institution": "National Institute of Advanced Industrial Science and Technology", + "dsl": "Human Augmentation Research Center" + } + ], + "personId": 170489 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + }, + { + "country": "Japan", + "state": "", + "city": "Kyoto", + "institution": "Sony CSL Kyoto", + "dsl": "" + } + ], + "personId": 169707 + } + ] + }, + { + "id": 170913, + "typeId": 13749, + "title": "Development and Evaluation of Collision Avoidance User Interface for Assistive Vision Impaired Navigation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686354" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-7114", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175078 + ], + "eventIds": [], + "abstract": "In this paper, an initial novel user interface prototype is developed and evaluated to assist vision impaired people to avoid collision with moving people while navigating in indoor environments. The user interface performs both pose classification and distance estimation based on RGB images and sends the results to the decision tree classifier model which classifies whether it is safe for vision impaired person to navigate, or should the vision impaired person be cautious or stop to avoid collision with the moving person. Experimentation showed that the user interface with the combined performance of pose classification, distance estimation and decision tree model showed an accuracy of 93.55% on a testing video dataset.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Arlington", + "institution": "The University of Texas at Arlington", + "dsl": "" + } + ], + "personId": 169769 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Arlington", + "institution": "The University of Texas at Arlington", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 169846 + } + ] + }, + { + "id": 170914, + "typeId": 13748, + "title": "PrISM-Observer: Intervention Agent to Help Users Perform Everyday Procedures Sensed using a Smartwatch", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676350" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1229", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171022, + 175073 + ], + "eventIds": [], + "abstract": "We routinely perform procedures (such as cooking) that include a set of atomic steps. Often, inadvertent omission or misordering of a single step can lead to serious consequences, especially for those experiencing cognitive challenges such as dementia. This paper introduces PrISM-Observer, a smartwatch-based, context-aware, real-time intervention system designed to support daily tasks by preventing errors. Unlike traditional systems that require users to seek out information, the agent observes user actions and intervenes proactively. This capability is enabled by the agent's ability to continuously update its belief in the user's behavior in real-time through multimodal sensing and forecast optimal intervention moments and methods. We first validated the steps-tracking performance of our framework through evaluations across three datasets with different complexities. Then, we implemented a real-time agent system using a smartwatch and conducted a user study in a cooking task scenario. The system generated helpful interventions, and we gained positive feedback from the participants. The general applicability of PrISM-Observer to daily tasks promises broad applications, for instance, including support for users requiring more involved interventions, such as people with dementia or post-surgical patients.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170701 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tsukuba", + "institution": "University of Tsukuba", + "dsl": "" + } + ], + "personId": 169853 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "School of Computer Science" + } + ], + "personId": 170564 + } + ] + }, + { + "id": 170915, + "typeId": 13749, + "title": "SelfGauge: An Intelligent Tool to Support Student Self-assessment in GenAI-enhanced Project-based Learning", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686338" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6821", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059 + ], + "eventIds": [], + "abstract": "Project-based learning (PBL) involves students tackling real-world problems and creating artifacts. With the rise of generative AI (GenAI) tools, assessing students in GenAI-enhanced PBL is challenging. To address this, we designed SelfGauge, a tool that supports student self-assessment by analyzing their GenAI usage and project artifacts. It helps students define criteria, seek feedback, and reflect on their performance, promoting continuous self-improvement.", + "authors": [ + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "Hong Kong University of Science and Technology", + "dsl": "" + } + ], + "personId": 169834 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "New Territories", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 170277 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "The Hong Kong University of Science and Technology", + "dsl": "" + } + ], + "personId": 170599 + }, + { + "affiliations": [ + { + "country": "Hong Kong", + "state": "", + "city": "Hong Kong", + "institution": "Hong Kong University of Science and Technology", + "dsl": "" + } + ], + "personId": 170268 + } + ] + }, + { + "id": 170916, + "typeId": 13745, + "title": "Supporting Control and Alignment in Personal Informatics Tools", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686709" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24e-1003", + "source": "PCS", + "trackId": 13202, + "tags": [], + "keywords": [], + "sessionIds": [ + 171036 + ], + "eventIds": [], + "abstract": "Despite the abundance of diverse personal data and its potential for improving health, individuals struggle to draw value from it. A key challenge is difficulties in controlling the functionality of existing systems and aligning them with evolving needs. These systems commonly restrict what information is recorded and how, lack effective means for sense-making and decision-making, and fall short in supporting the translation of data insights into personalized actions. My research addresses these challenges through building prototype systems, designing interactive techniques, and devising computational algorithms.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Allen School" + } + ], + "personId": 169999 + } + ] + }, + { + "id": 170917, + "typeId": 13744, + "title": "Towards an LLM-Based Speech Interface for Robot-Assisted Feeding", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686759" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1087", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "assistive robotics", + "large language models (LLMs)", + "speech interfaces" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Physically assistive robots present an opportunity to significantly increase the well-being and independence of individuals with motor impairments or other forms of disability who are unable to complete activities of daily living (ADLs). Speech interfaces, especially ones that utilize Large Language Models (LLMs), can enable individuals to effectively and naturally communicate high-level commands and nuanced preferences to robots. In this work, we demonstrate an LLM-based speech interface for a commercially available assistive feeding robot. Our system is based on an iteratively designed framework, from the paper \"VoicePilot: Harnessing LLMs as Speech Interfaces for Physically Assistive Robots,\" that incorporates human-centric elements for integrating LLMs as interfaces for robots. It has been evaluated through a user study with 11 older adults at an independent living facility.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 169991 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170439 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Robotics Institute" + } + ], + "personId": 170499 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon", + "dsl": "Robotics Institute" + } + ], + "personId": 170299 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Mechanical Engineering Department" + } + ], + "personId": 170387 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Robotics Institute" + } + ], + "personId": 170283 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170175 + } + ] + }, + { + "id": 170918, + "typeId": 13748, + "title": "Improving Steering and Verification in AI-Assisted Data Analysis with Interactive Task Decomposition", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676345" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6134", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175083, + 171048 + ], + "eventIds": [], + "abstract": "LLM-powered tools like ChatGPT Data Analysis, have the potential to help users tackle the challenging task of data analysis programming, which requires expertise in data processing, programming, and statistics. However, our formative study (n=15) uncovered serious challenges in verifying AI-generated results and steering the AI (i.e., guiding the AI system to produce the desired output). We developed two contrasting approaches to address these challenges. The first (Stepwise) decomposes the problem into step-by-step subgoals with pairs of editable assumptions and code until task completion, while the second (Phasewise) decomposes the entire problem into three editable, logical phases: structured input/output assumptions, execution plan, and code. A controlled, within-subjects experiment (n=18) compared these systems against a conversational baseline. Users reported significantly greater control with the Stepwise and Phasewise systems, and found intervention, correction, and verification easier, compared to the baseline. The results suggest design guidelines and trade-offs for AI-assisted data analysis tools.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Computer Science / DGP" + } + ], + "personId": 170071 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Cambridge", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 170464 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Cambridge", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 170241 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170395 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft", + "dsl": "" + } + ], + "personId": 170621 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "Cambridgeshire", + "city": "Cambridge", + "institution": "Microsoft Research ", + "dsl": "" + } + ], + "personId": 170685 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Cambridge", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 170403 + } + ] + }, + { + "id": 170919, + "typeId": 13744, + "title": "Block and Detail: Scaffolding Sketch-to-Image Generation", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1089", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We introduce a novel sketch-to-image tool that aligns with the iterative refinement process of artists. Our tool lets users sketch blocking strokes to coarsely represent the placement and form of objects and detail strokes to refine their shape and silhouettes. We develop a two-pass algorithm for generating high-fidelity images from such sketches at any point in the iterative process. In the first pass we use a ControlNet to generate an image that strictly follows all the strokes (blocking and detail) and in the second pass we add variation by renoising regions surrounding blocking strokes. We also present a dataset generation scheme that, when used to train a ControlNet architecture, allows regions that do not contain strokes to be interpreted as not-yet-specified regions rather than empty space. We show that this partial-sketch-aware ControlNet can generate coherent elements from partial sketches that only contain a small number of strokes. The high-fidelity images produced by our approach serve as scaffolds that can help the user adjust the shape and proportions of objects or add additional elements to the composition. We demonstrate the effectiveness of our approach with a variety of examples and evaluative comparisons. Quantitatively, novice viewers prefer the quality of images from our algorithm over a baseline Scribble ControlNet for 82% of the pairs and found our images had less distortion in 80% of the pairs.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Computer Science" + } + ], + "personId": 170477 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 170156 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Palo Alto", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 170110 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "" + }, + { + "country": "United States", + "state": "California", + "city": "San Mateo", + "institution": "Roblox", + "dsl": "" + } + ], + "personId": 170187 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Palo Alto", + "institution": "Stanford University", + "dsl": "" + } + ], + "personId": 170396 + } + ] + }, + { + "id": 170920, + "typeId": 13748, + "title": "GradualReality: Enhancing Physical Object Interaction in Virtual Reality via Interaction State-Aware Blending", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676463" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6372", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175074, + 171031 + ], + "eventIds": [], + "abstract": "We present GradualReality, a novel interface enabling a Cross Reality experience that includes gradual interaction with physical objects in a virtual environment and supports both presence and usability. Daily Cross Reality interaction is challenging as the user's physical object interaction state is continuously changing over time, causing their attention to frequently shift between the virtual and physical worlds. As such, presence in the virtual environment and seamless usability for interacting with physical objects should be maintained at a high level. To address this issue, we present an Interaction State-Aware Blending approach that (i) balances immersion and interaction capability and (ii) provides a fine-grained, gradual transition between virtual and physical worlds. The key idea includes categorizing the flow of physical object interaction into multiple states and designing novel blending methods that offer optimal presence and sufficient physical awareness at each state. We performed extensive user studies and interviews with a working prototype and demonstrated that GradualReality provides better Cross Reality experiences compared to baselines.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Seoul", + "institution": "Seoul National University ", + "dsl": "" + } + ], + "personId": 170213 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Cambridge", + "institution": "Nokia Bell Labs", + "dsl": "" + } + ], + "personId": 170382 + }, + { + "affiliations": [ + { + "country": "Singapore", + "state": "", + "city": "Singapore", + "institution": "Singapore Management University", + "dsl": "School of Information Systems" + } + ], + "personId": 170378 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Seoul", + "institution": "Seoul National University", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170287 + } + ] + }, + { + "id": 170921, + "typeId": 13744, + "title": "Demonstrating MagneDot: Integrated Fabrication and Actuation Methods of Dot-Based Magnetic Shape Displays", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1092", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We present MagneDot, a novel method for making interactive magnetic shape displays through an integrated fabrication process. Magnetic soft materials can potentially create fast, responsive morphing structures for human-computer interaction (HCI). However, novice users and designers typically do not have access to sophisticated equipment and materials or cannot afford heavy labor to create interactive objects based on this material. Modified from an open-source 3D printer, the fabrication system of MagneDot integrates the processes of mold-making, pneumatic extrusion, magnetization, and actuation, using cost-effective materials only. By providing a design tool, MagneDot allows users to generate G-Codes for fabricating and actuating displays of various morphing effects. Finally, a series of design examples demonstrate the possibilities of shape displays enabled by MagneDot.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 169901 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 170594 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169915 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 169694 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 170343 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170182 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Hangzhou City University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169747 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Ningbo", + "institution": "College of Science & Technology Ningbo University", + "dsl": "" + } + ], + "personId": 170263 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Hangzhou City University", + "dsl": "" + } + ], + "personId": 170366 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170103 + } + ] + }, + { + "id": 170922, + "typeId": 13748, + "title": "Tyche: Making Sense of Property-Based Testing Effectiveness", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676407" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4866", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171055, + 175081 + ], + "eventIds": [], + "abstract": "Software developers increasingly rely on automated methods to assess the\r\ncorrectness of their code. One such method is property-based testing\r\n(PBT), wherein a test harness generates hundreds or thousands of inputs\r\nand checks the outputs of the program on those inputs using parametric\r\nproperties. Though powerful, PBT induces a sizable gulf of evaluation:\r\ndevelopers need to put in nontrivial effort to understand how well the\r\ndifferent test inputs exercise the software under test. To bridge this\r\ngulf, we propose Tyche, a user interface that supports sensemaking\r\naround the effectiveness of property-based tests. Guided by a formative\r\ndesign exploration, our design of Tyche supports developers with\r\ninteractive, configurable views of test behavior with tight integrations\r\ninto modern developer testing workflow. These views help developers\r\nexplore global testing behavior and individual test inputs alike. To\r\naccelerate the development of powerful, interactive PBT tools, we define\r\na standard for PBT test reporting and integrate it with a widely used\r\nPBT library. A self-guided online usability study revealed that Tyche's\r\nvisualizations help developers to more accurately assess software\r\ntesting effectiveness.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Philadelphia", + "institution": "University of Pennsylvania", + "dsl": "Computer and Information Science" + } + ], + "personId": 170516 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Philadelphia", + "institution": "University of Pennsylvania", + "dsl": "" + } + ], + "personId": 169940 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Anthropic", + "dsl": "" + } + ], + "personId": 170615 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Philadelphia", + "institution": "University of Pennsylvania", + "dsl": "Computer & Information Science" + } + ], + "personId": 170523 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Philadelphia", + "institution": "University of Pennsylvania", + "dsl": "Department of Computer and Information Science" + } + ], + "personId": 170534 + } + ] + }, + { + "id": 170923, + "typeId": 13756, + "title": "TactileNet: Bringing Touch Closer in the Digital World", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686731" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24d-3256", + "source": "PCS", + "trackId": 13203, + "tags": [], + "keywords": [], + "sessionIds": [ + 171027 + ], + "eventIds": [], + "abstract": "Haptic technologies have improved human-computer interaction (HCI) by enabling immersive tactile experiences. This proposal introduces a 2D touch actuator to simulate rubbing sensations on human skin and transmit them over the internet. Using a controllable tilted roller mechanism, the system recreates various lateral touch sensations. Advanced hand gesture recognition algorithms identify and transmit the input user's actions to the output user. Utilizing AI-enabled computing chips from the Gen-M kit, the project aims to develop a prototype that accurately simulates natural touch interactions.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Division of Engineering Science" + } + ], + "personId": 170579 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Division of Engineering Science" + }, + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Dynamic Graphics Project lab" + } + ], + "personId": 169994 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Division of Engineering Science" + }, + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Division of Engineering Science" + } + ], + "personId": 169931 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Dynamic Graphics Project Lab" + }, + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170102 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170207 + } + ] + }, + { + "id": 170924, + "typeId": 13748, + "title": "What's the Game, then? Opportunities and Challenges for Runtime Behavior Generation", + "award": "BEST_PAPER", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676358" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1230", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171028, + 175071 + ], + "eventIds": [], + "abstract": "Procedural content generation (PCG), the process of algorithmically creating game components instead of manually, has been a common tool of game development for decades. Recent advances in large language models (LLMs) enable the generation of game behaviors based on player input at runtime. Such code generation brings with it the possibility of entirely new gameplay interactions that may be difficult to integrate with typical game development workflows. We explore these implications through GROMIT, a novel LLM-based runtime behavior generation system for Unity. When triggered by a player action, GROMIT generates a relevant behavior which is compiled without developer intervention and incorporated into the game. We create three demonstration scenarios with GROMIT to investigate how such a technology might be used in game development. In a system evaluation we find that our implementation is able to produce behaviors that result in significant downstream impacts to gameplay. We then conduct an interview study with n=13 game developers using GROMIT as a probe to elicit their current opinion on runtime behavior generation tools, and enumerate the specific themes curtailing the wider use of such tools. We find that the main themes of concern are quality considerations, community expectations, and fit with developer workflows, and that several of the subthemes are unique to runtime behavior generation specifically. We outline a future work agenda to address these concerns, including the need for additional guardrail systems for behavior generation.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170619 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170673 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170642 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 170496 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 169964 + } + ] + }, + { + "id": 170925, + "typeId": 13748, + "title": "EyeFormer: Predicting Personalized Scanpaths with Transformer-Guided Reinforcement Learning", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676436" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7213", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175076, + 171053 + ], + "eventIds": [], + "abstract": "From a visual-perception perspective, modern graphical user interfaces (GUIs) comprise a complex graphics-rich two-dimensional visuospatial arrangement of text, images, and interactive objects such as buttons and menus. While existing models can accurately predict regions and objects that are likely to attract attention ``on average'', no scanpath model has been capable of predicting scanpaths for an individual. To close this gap, we introduce EyeFormer, which utilizes a Transformer architecture as a policy network to guide a deep reinforcement learning algorithm that predicts gaze locations. Our model offers the unique capability of producing personalized predictions when given a few user scanpath samples. It can predict full scanpath information, including fixation positions and durations, across individuals and various stimulus types. Additionally, we demonstrate applications in GUI layout optimization driven by our model. ", + "authors": [ + { + "affiliations": [ + { + "country": "Finland", + "state": "", + "city": "Espoo", + "institution": "Aalto University", + "dsl": "" + } + ], + "personId": 170284 + }, + { + "affiliations": [ + { + "country": "Finland", + "state": "", + "city": "Espoo", + "institution": "Aalto University", + "dsl": "" + } + ], + "personId": 170518 + }, + { + "affiliations": [ + { + "country": "Finland", + "state": "", + "city": "Espoo", + "institution": "Nokia Technologies", + "dsl": "" + } + ], + "personId": 169877 + }, + { + "affiliations": [ + { + "country": "Luxembourg", + "state": "", + "city": "Esch-sur-Alzette", + "institution": "University of Luxembourg", + "dsl": "" + } + ], + "personId": 169898 + }, + { + "affiliations": [ + { + "country": "Finland", + "state": "", + "city": "Helsinki", + "institution": "Aalto University", + "dsl": "" + } + ], + "personId": 170669 + } + ] + }, + { + "id": 170926, + "typeId": 13748, + "title": "Gait Gestures: Examining Stride and Foot Strike Variation as an Input Method While Walking", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676342" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3531", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175077, + 171047 + ], + "eventIds": [], + "abstract": "Walking is a cyclic pattern of alternating footstep strikes, with each pair of steps forming a stride, and a series of strides forming a gait. We conduct a systematic examination of different kinds of intentional variations from a normal gait that could be used as input actions without interrupting overall walking progress. A design space of 22 candidate Gait Gestures is generated by adapting previous standing foot input actions and identifying new actions possible in a walking context. A formative study (n=25) examines movement easiness, social acceptability, and walking compatibility with foot movement logging to calculate temporal and spatial characteristics. Using a categorization of these results, 7 gestures are selected for a wizard-of-oz prototype demonstrating an AR interface controlled by Gait Gestures for ordering food and audio playback while walking. As a technical proof-of-concept, a gait gesture recognizer is developed and tested using the formative study data.", + "authors": [ + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170441 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "David R. Cheriton School of Computer Science" + } + ], + "personId": 170459 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "Cheriton School of Computer Science" + } + ], + "personId": 169993 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "Cheriton School of Computer Science" + } + ], + "personId": 170390 + } + ] + }, + { + "id": 170927, + "typeId": 13748, + "title": "SonoHaptics: An Audio-Haptic Cursor for Gaze-Based Object Selection in XR", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676384" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9050", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171034, + 175091 + ], + "eventIds": [], + "abstract": "We introduce SonoHaptics, an audio-haptic cursor for gaze-based 3D object selection. SonoHaptics addresses challenges around providing accurate visual feedback during gaze-based selection in Extended Reality (XR), e.g., lack of world-locked displays in no- or limited-display smart glasses and visual inconsistencies. To enable users to distinguish objects without visual feedback, SonoHaptics employs the concept of cross-modal correspondence in human perception to map visual features of objects (color, size, position, material) to audio-haptic properties (pitch, amplitude, direction, timbre). We contribute data-driven models for determining cross-modal mappings of visual features to audio and haptic features, and a computational approach to automatically generate audio-haptic feedback for objects in the user's environment. SonoHaptics provides global feedback that is unique to each object in the scene, and local feedback to amplify differences between nearby objects. Our comparative evaluation shows that SonoHaptics enables accurate object identification and selection in a cluttered scene without visual feedback.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170478 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Meta", + "dsl": "" + } + ], + "personId": 169748 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170019 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "School of Mechanical Engineering" + } + ], + "personId": 170252 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Burlingame ", + "institution": "Meta reality labs", + "dsl": "" + } + ], + "personId": 170276 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Reality Labs Research, Meta Inc.", + "dsl": "" + } + ], + "personId": 169857 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170100 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Meta Inc.", + "dsl": "Reality Labs Research" + } + ], + "personId": 169808 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Reality Labs Research", + "dsl": "" + } + ], + "personId": 169685 + } + ] + }, + { + "id": 170928, + "typeId": 13745, + "title": "Extending the Senses of Ubiquitous Devices", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686712" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24e-1039", + "source": "PCS", + "trackId": 13202, + "tags": [], + "keywords": [], + "sessionIds": [ + 171036 + ], + "eventIds": [], + "abstract": "Modern smart devices are equipped with numerous sensors that can serve new functions beyond their original design when repurposed through side-channel sensing. Side-channel sensing involves leveraging existing sensors in unconventional ways to detect subtle signals and gather information beyond their original intended purpose. This thesis explores the untapped potential of side-channel sensing to enhance the functionality of existing smart devices, particularly in addressing niche user needs. We present two approaches: The first technique involves low-cost, low-power sensor add-ons that users can attach to their devices, enhancing functionality without any internal modifications to the device. We showcase GlucoScreen, a smartphone add-on that leverages the capacitive touchscreen for blood glucose monitoring, and WatchLink, which allows users to connect external sensors to their smartwatches via the ECG interface. The second technique focuses on manufacturers making targeted upgrades to existing hardware to improve functionalities at reduced time and cost. We present Z-Ring, a ring wearable that expands the bandwidth of bio-impedance sensing to enable micro-gesture interactions and object identification. Our findings highlight the potential of side-channel sensing to create more personalized and adaptable technology. \r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School for Computer Science & Engineering" + } + ], + "personId": 169822 + } + ] + }, + { + "id": 170929, + "typeId": 13748, + "title": "StyleFactory: Towards Better Style Alignment in Image Creation through Style-Strength-Based Control and Evaluation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676370" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6584", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171028, + 175071 + ], + "eventIds": [], + "abstract": "Generative AI models have been widely used for image creation. However, generating images that are well-aligned with users' personal styles on aesthetic features (e.g., color and texture) can be challenging due to the poor style expression and interpretation between humans and models. Through a formative study, we observed that participants showed a clear subjective perception of the desired style and variations in its strength, which directly inspired us to develop style-strength-based control and evaluation. Building on this, we present StyleFactory, an interactive system that helps users achieve style alignment. Our interface enables users to rank images based on their strengths in the desired style and visualizes the strength distribution of other images in that style from the model's perspective. In this way, users can evaluate the understanding gap between themselves and the model, and define well-aligned personal styles for image creation through targeted iterations. Our technical evaluation and user study demonstrate that StyleFactory accurately generates images in specific styles, effectively facilitates style alignment in image creation workflow, stimulates creativity, and enhances the user experience in human-AI interactions.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 170383 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "School of Software Technology" + } + ], + "personId": 170558 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 169880 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Media and International Culture" + } + ], + "personId": 170056 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 169843 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 170260 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 170157 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 170322 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 169957 + } + ] + }, + { + "id": 170930, + "typeId": 13748, + "title": "SkipWriter: LLM-Powered Abbreviated Writing on Tablets", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676423" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6221", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171019, + 175065 + ], + "eventIds": [], + "abstract": "Large Language Models (LLMs) may offer transformative opportunities for text input, especially for physically demanding modalities like handwriting. We studied a form of abbreviated handwriting by designing, developing, and evaluating a prototype, named SkipWriter, that converts handwritten strokes of a variable-length prefix-based abbreviation (e.g. \"ho a y\" as handwritten strokes) into the intended full phrase (e.g., \"how are you\" in the digital format) based on the preceding context. SkipWriter consists of an in-production handwriting recognizer and an LLM fine-tuned on this task. With flexible pen input, SkipWriter allows the user to add and revise prefix strokes when predictions do not match the user's intent. An user evaluation demonstrated a 60% reduction in motor movements with an average speed of 25.78 WPM. We also showed that this reduction is close to the ceiling of our model in an offline simulation.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New Hampshire", + "city": "Hanover", + "institution": "Dartmouth College", + "dsl": "Department of Computer Science" + } + ], + "personId": 169844 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170342 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "La Jolla", + "institution": "UC San Diego", + "dsl": "" + } + ], + "personId": 169955 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 169969 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Mountain View", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170254 + } + ] + }, + { + "id": 170931, + "typeId": 13748, + "title": "HoloChemie - Sustainable Fabrication of Soft Biochemical Holographic Devices for Ubiquitous Sensing", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676448" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6582", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175079, + 171033 + ], + "eventIds": [], + "abstract": "Sustainable fabrication approaches and biomaterials are increasingly being used in HCI to fabricate interactive devices. However, the majority of the work has focused on integrating electronics. This paper takes a sustainable approach to exploring the fabrication of biochemical sensing devices. Firstly, we contribute a set of biochemical formulations for biological and environmental sensing with bio-sourced and environment-friendly substrate materials. Our formulations are based on a combination of enzymes derived from bacteria and fungi, plant extracts and commercially available chemicals to sense both liquid and gaseous analytes: glucose, lactic acid, pH levels and carbon dioxide. Our novel holographic sensing scheme allows for detecting the presence of analytes and enables quantitative estimation of the analyte levels. We present a set of application scenarios that demonstrate the versatility of our approach and discuss the sustainability aspects, its limitations, and the implications for bio-chemical systems in HCI.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Electrical and Software Engineering Department" + } + ], + "personId": 170536 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Department of Electrical and Software Engineering" + } + ], + "personId": 170603 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Department of Biomedical Engineering " + } + ], + "personId": 170444 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Biomedical Engineering" + } + ], + "personId": 170217 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "Department of Computer Science" + } + ], + "personId": 170094 + } + ] + }, + { + "id": 170932, + "typeId": 13748, + "title": "Personal Time-Lapse", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676383" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9970", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175061, + 171042 + ], + "eventIds": [], + "abstract": "Our bodies are constantly in motion—from the bending of arms and legs to the less conscious movement of breathing, our precise shape and location change constantly. This can make subtler developments (e.g., the growth of hair, or the healing of a wound) difficult to observe. Our work focuses on helping users record and visualize this type of subtle, longer-term change. We present a mobile tool that combines custom 3D tracking with interactive visual feedback and computational imaging to capture personal time-lapse, which approximates longer-term video of the subject (typically, part of the capturing user’s body) under a fixed viewpoint, body pose, and lighting condition. These personal time-lapses offer a powerful and detailed way to track visual changes of the subject over time. We begin with a formative study that examines what makes personal time-lapse so difficult to capture. Building on our findings, we motivate the design of our capture tool, evaluate this design with users, and demonstrate its effectiveness in a variety of challenging examples.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "Computer Science" + } + ], + "personId": 170027 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "Computer Science" + } + ], + "personId": 170325 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York City", + "institution": "Cornell University", + "dsl": "Information Science" + } + ], + "personId": 169735 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Cornell Tech, Cornell University", + "dsl": "" + } + ], + "personId": 170266 + } + ] + }, + { + "id": 170933, + "typeId": 13748, + "title": "VRCopilot: Authoring 3D Layouts with Generative AI Models in VR", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676451" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8760", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175083, + 171048 + ], + "eventIds": [], + "abstract": "Immersive authoring provides an intuitive medium for users to create 3D scenes via direct manipulation in Virtual Reality (VR). Recent advances in generative AI have enabled the automatic creation of realistic 3D layouts. However, it is unclear how capabilities of generative AI can be used in immersive authoring to support fluid interactions, user agency, and creativity. We introduce VRCopilot, a mixed-initiative system that integrates pre-trained generative AI models into immersive authoring to facilitate human-AI co-creation in VR. VRCopilot presents multimodal interactions to support rapid prototyping and iterations with AI, and intermediate representations such as wireframes to augment user controllability over the created content. Through a series of user studies, we evaluated the potential and challenges in manual, scaffolded, and automatic creation in immersive authoring. We found that scaffolded creation using wireframes enhanced the user agency compared to automatic creation. We also found that manual creation via multimodal specification offers the highest sense of creativity and agency.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170481 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 169859 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 169690 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170239 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170298 + } + ] + }, + { + "id": 170934, + "typeId": 13748, + "title": "MobiPrint: A Mobile 3D Printer for Environment-Scale Design and Fabrication", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676459" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6583", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175068, + 171052 + ], + "eventIds": [], + "abstract": "3D printing is transforming how we customize and create physical objects in engineering, accessibility, and art. However, this technology is still primarily limited to confined working areas and dedicated print beds thereby detaching design and fabrication from real-world environments and making measuring and scaling objects tedious and labor-intensive. In this paper, we present MobiPrint, a prototype mobile fabrication system that combines elements from robotics, architecture, and Human-Computer Interaction (HCI) to enable environment-scale design and fabrication in ad-hoc indoor environments. MobiPrint provides a multi-stage fabrication pipeline: first, the robotic 3D printer automatically scans and maps an indoor space; second, a custom design tool converts the map into an interactive CAD canvas for editing and placing models in the physical world; finally, the MobiPrint robot prints the object directly on the ground at the defined location. Through a \"proof-by-demonstration\" validation, we highlight our system's potential across different applications, including accessibility, home furnishing, floor signage, and art. We also conduct a technical evaluation to assess MobiPrint’s localization accuracy, ground surface adhesion, payload capacity, and mapping speed. We close with a discussion of open challenges and opportunities for the future of contextualized mobile fabrication.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science" + } + ], + "personId": 169904 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "Computer Graphics Technology" + } + ], + "personId": 170328 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170537 + } + ] + }, + { + "id": 170935, + "typeId": 13748, + "title": "KODA: Knit-program Optimization by Dependency Analysis", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676405" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-7430", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171020, + 175086 + ], + "eventIds": [], + "abstract": "Digital knitting machines have the capability to reliably manufacture seamless, textured, and multi-material garments, but these capabilities are obscured by limiting CAD tools. Recent innovations in computational knitting build on emerging programming infrastructure that gives full access to the machine's capabilities but requires an extensive understanding of machine operations and execution. In this paper, we contribute a critical missing piece of the knitting-machine programming pipeline--a program optimizer. Program optimization allows programmers to focus on developing novel algorithms that produce desired fabrics while deferring concerns of efficient machine operations to the optimizer. We present KODA, the Knit-program Optimization by Dependency Analysis method. KODA re-orders and reduces machine instructions to reduce knitting time, increase knitting reliability, and manage boilerplate operations that adjust the machine state. The result is a system that enables programmers to write readable and intuitive knitting algorithms while producing efficient and verified programs. ", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "Northeastern University", + "dsl": "Khoury College of Computer Sciences" + } + ], + "personId": 169726 + } + ] + }, + { + "id": 170936, + "typeId": 13748, + "title": "Rhapso: Automatically Embedding Fiber Materials into 3D Prints for Enhanced Interactivity", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676468" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4159", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171040, + 175080 + ], + "eventIds": [], + "abstract": " We introduce Rhapso, a 3D printing system designed to embed a diverse range of continuous fiber materials within 3D objects during the printing process. This approach enables integrating properties like tensile strength, force storage and transmission, or aesthetic and tactile characteristics, directly into low-cost thermoplastic 3D prints. These functional objects can have intricate actuation, self-assembly, and sensing capabilities with little to no manual intervention. To achieve this, we modify a low-cost Fused Filament Fabrication (FFF) 3D printer, adding a stepper motor-controlled fiber spool mechanism on a gear ring above the print bed. In addition to hardware, we provide parsing software for precise fiber placement, which generates Gcode for printer operation. To illustrate the versatility of our system, we present applications that showcase its extensive design potential. Additionally, we offer comprehensive documentation and open designs, empowering others to replicate our system and explore its possibilities.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Copenhagen", + "institution": "University of Copenhagen", + "dsl": "Department of Computer Science" + } + ], + "personId": 169854 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "Department of Computer Science and Information" + } + ], + "personId": 169811 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "West Midlands", + "city": "Birmingham", + "institution": "University of Birmingham", + "dsl": "School of Computer Science" + } + ], + "personId": 170147 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Copenhagen", + "institution": "University of Copenhagen", + "dsl": "Department of Computer Science" + } + ], + "personId": 170486 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University Of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170082 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Copenhagen", + "institution": "University of Copenhagen", + "dsl": "Department of Computer Science" + } + ], + "personId": 170149 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170582 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University of Maryland", + "dsl": "Computer Science" + } + ], + "personId": 170113 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Birmingham", + "institution": "University of Birmingham", + "dsl": "School of Computer Science" + } + ], + "personId": 170559 + } + ] + }, + { + "id": 170937, + "typeId": 13748, + "title": "SERENUS: Alleviating Low-Battery Anxiety Through Real-time, Accurate, and User-Friendly Energy Consumption Prediction of Mobile Applications", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676437" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4158", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175082, + 171046 + ], + "eventIds": [], + "abstract": "Low-battery anxiety has emerged as a result of growing dependence on mobile devices, where the anxiety arises when the battery level runs low. While battery life can be extended through power-efficient hardware and software optimization techniques, low-battery anxiety will remain a phenomenon as long as mobile devices rely on batteries. In this paper, we investigate how an accurate real-time energy consumption prediction at the application-level can improve the user experience in low-battery situations. We present Serenus, a mobile system framework specifically tailored to predict the energy consumption of each mobile application and present the prediction in a user-friendly manner. We conducted user studies using Serenus to verify that highly accurate energy consumption predictions can effectively alleviate low-battery anxiety by assisting users in planning their application usage based on the remaining battery life. We summarize requirements to mitigate users’ anxiety, guiding the design of future mobile system frameworks.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + } + ], + "personId": 170556 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + } + ], + "personId": 170485 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + } + ], + "personId": 169938 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + } + ], + "personId": 169718 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + } + ], + "personId": 170234 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "DGIST", + "dsl": "Electrical Engineering and Computer Science" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "DGIST", + "dsl": "Electrical Engineering and Computer Science" + } + ], + "personId": 169883 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + }, + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "School of Computing" + } + ], + "personId": 170202 + } + ] + }, + { + "id": 170938, + "typeId": 13748, + "title": "Vision-Based Hand Gesture Customization from a Single Demonstration", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676378" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4037", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175061, + 171042 + ], + "eventIds": [], + "abstract": "Hand gesture recognition is becoming a more prevalent mode of human-computer interaction, especially as cameras proliferate across everyday devices. Despite continued progress in this field, gesture customization is often underexplored. Customization is crucial since it enables users to define and demonstrate gestures that are more natural, memorable, and accessible. However, customization requires efficient usage of user-provided data. We introduce a method that enables users to easily design bespoke gestures with a monocular camera from one demonstration. We employ transformers and meta-learning techniques to address few-shot learning challenges. Unlike prior work, our method supports any combination of one-handed, two-handed, static, and dynamic gestures, including different viewpoints, and the ability to handle irrelevant hand movements. We implement three real-world applications using our customization method, conduct a user study, and achieve up to 94\\% average recognition accuracy from one demonstration. Our work provides a viable path for vision-based gesture customization, laying the foundation for future advancements in this domain. ", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc.", + "dsl": "" + } + ], + "personId": 170557 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc.", + "dsl": "" + } + ], + "personId": 170386 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc.", + "dsl": "" + } + ], + "personId": 169887 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Apple lnc.", + "dsl": "" + } + ], + "personId": 170667 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc.", + "dsl": "" + } + ], + "personId": 170462 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc.", + "dsl": "" + } + ], + "personId": 170062 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc.", + "dsl": "" + } + ], + "personId": 170664 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino", + "institution": "Apple Inc.", + "dsl": "AIML" + } + ], + "personId": 170016 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Cupertino ", + "institution": "Apple Inc.", + "dsl": "" + } + ], + "personId": 170174 + } + ] + }, + { + "id": 170939, + "typeId": 13748, + "title": "Empower Real-World BCIs with NIRS-X: An Adaptive Learning Framework that Harnesses Unlabeled Brain Signals", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676429" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5489", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171044, + 175085 + ], + "eventIds": [], + "abstract": "Brain-Computer Interfaces (BCIs) using functional near-infrared spectroscopy (fNIRS) hold promise for future interactive user interfaces due to their ease of deployment and declining cost. However, they typically require a separate calibration process for each user and task, which can be burdensome. Machine learning helps, but faces a data scarcity problem. Due to inherent inter-user variations in physiological data, it has been typical to create a new annotated training dataset for every new task and user. To reduce dependence on such extensive data collection and labeling, we present an adaptive learning framework, NIRS-X, to harness more easily accessible unlabeled fNIRS data. NIRS-X includes two key components: NIRSiam and NIRSformer. We use the NIRSiam algorithm to extract generalized brain activity representations from unlabeled fNIRS data obtained from previous users and tasks, and then transfer that knowledge to new users and tasks. In conjunction, we design a neural network, NIRSformer, tailored for capturing both local and global, spatial and temporal relationships in multi-channel fNIRS brain input signals. By using unlabeled data from both a previously released fNIRS2MW visual $n$-back dataset and a newly collected fNIRS2MW audio $n$-back dataset, NIRS-X demonstrates its strong adaptation capability to new users and tasks. Results show comparable or superior performance to supervised methods, making NIRS-X promising for real-world fNIRS-based BCIs.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Medford", + "institution": "Tufts University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170279 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Computer Science, University of San Francisco", + "dsl": "" + }, + { + "country": "United States", + "state": "Massachusetts", + "city": "Medford", + "institution": "Tufts University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170128 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "Northeastern University", + "dsl": "" + } + ], + "personId": 170055 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Google LLC", + "dsl": "" + } + ], + "personId": 170361 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "University of San Francisco", + "dsl": "Computer Science" + } + ], + "personId": 170676 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Medford", + "institution": "Tufts University", + "dsl": "Department of Biomedical Engineering" + } + ], + "personId": 170011 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Medford", + "institution": "Tufts University", + "dsl": "Computer Science" + } + ], + "personId": 169838 + } + ] + }, + { + "id": 170940, + "typeId": 13748, + "title": "WorldScribe: Towards Context-Aware Live Visual Descriptions", + "award": "BEST_PAPER", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676375" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5126", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171025, + 175075 + ], + "eventIds": [], + "abstract": "Automated live visual descriptions can aid blind people in understanding their surroundings with autonomy and independence. However, providing descriptions that are rich, contextual, and just-in-time has been a long-standing challenge in accessibility. In this work, we develop WorldScribe, a system that generates automated live real-world visual descriptions that are customizable and adaptive to users' contexts: (i) WorldScribe's descriptions are tailored to users' intents and prioritized based on semantic relevance. (ii) WorldScribe is adaptive to visual contexts, e.g., providing consecutively succinct descriptions for dynamic scenes, while presenting longer and detailed ones for stable settings. (iii) WorldScribe is adaptive to sound contexts, e.g., increasing volume in noisy environments, or pausing when conversations start. Powered by a suite of vision, language, and sound recognition models, WorldScribe introduces a description generation pipeline that balances the tradeoffs between their richness and latency to support real-time use. The design of WorldScribe is informed by prior work on providing visual descriptions and a formative study with blind participants. Our user study and subsequent pipeline evaluation show that WorldScribe can provide real-time and fairly accurate visual descriptions to facilitate environment understanding that is adaptive and customized to users' contexts. Finally, we discuss the implications and further steps toward making live visual descriptions more context-aware and humanized.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170638 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170646 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170298 + } + ] + }, + { + "id": 170941, + "typeId": 13748, + "title": "Understanding the Effects of Restraining Finger Coactivation in Mid-Air Typing: from a Neuromechanical Perspective", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676441" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8511", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171044, + 175085 + ], + "eventIds": [], + "abstract": "Typing in mid-air is often perceived as intuitive yet presents challenges due to finger coactivation, a neuromechanical phenomenon that involves involuntary finger movements stemming from the lack of physical constraints. Previous studies were used to examine and address the impacts of finger coactivation using algorithmic approaches. Alternatively, this paper explores the neuromechanical effects of finger coactivation on mid-air typing, aiming to deepen our understanding and provide valuable insights to improve these interactions. We utilized a wearable device that restrains finger coactivation as a prop to conduct two mid-air studies, including a rapid finger-tapping task and a ten-finger typing task. The results revealed that restraining coactivation not only reduced mispresses, which is a classic coactivated error always considered as harm caused by coactivation. Unexpectedly, the reduction of motor control errors and spelling errors, thinking as non-coactivated errors, also be observed.\r\nAdditionally, the study evaluated the neural resources involved in motor execution using functional Near Infrared Spectroscopy (fNIRS), which tracked cortical arousal during mid-air typing. The findings demonstrated decreased activation in the primary motor cortex of the left hemisphere when coactivation was restrained, suggesting a diminished motor execution load. This reduction suggests that a portion of neural resources is conserved, which also potentially aligns with perceived lower mental workload and decreased frustration levels.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "China/Beijing", + "city": "Beijing", + "institution": "University of Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 170550 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Xi'an", + "institution": "Xi'an jiaotong university", + "dsl": "" + } + ], + "personId": 169905 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Shanghai", + "institution": "East China Normal University", + "dsl": "" + } + ], + "personId": 169773 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Institute of Software, Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 170319 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Institute of software, Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 170324 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Shaanxi", + "city": "Xi'an", + "institution": "School of mechanical engineering", + "dsl": "Xi'an Jiaotong University" + } + ], + "personId": 170247 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Peking", + "institution": "Peking University", + "dsl": "" + } + ], + "personId": 169678 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Beijing", + "city": "Beijing", + "institution": "Institute of Software, Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 170597 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Institute of Software, Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 169753 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Institute of software, Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 170289 + } + ] + }, + { + "id": 170942, + "typeId": 13749, + "title": "Stretchy Embroidered Circuits", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686343" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-5617", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175086 + ], + "eventIds": [], + "abstract": "We introduce a preliminary system which generates machine embroidery patterns for electronic circuits on stretchy fabrics. Our system incorporates parametric stretch-optimized embroidery paths, and we offer tips for e-textile embroidery on stretch fabrics.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon Univeristy", + "dsl": "" + } + ], + "personId": 170581 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170400 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170093 + } + ] + }, + { + "id": 170943, + "typeId": 13749, + "title": "MicroCode: Live, Portable Programming for Children via Robotics", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-3437", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171049, + 175081 + ], + "eventIds": [], + "abstract": "The BBC micro:bit is a popular tool in education for teaching coding, but typically requires a host computer and internet access, limiting its accessibility. MicroCode addresses this by enabling portable programming directly on the micro:bit using a battery-powered accessory with an OLED screen and navigation buttons. This system utilises a simple, handheld graphical tile-based programming paradigm, yet supports complex programs with features including conditional execution and variables, providing immediate feedback through live programming. This paper illustrates how early studies have received a positive reception from children and educators, especially when paired with robotics as an application domain. Plans for future work aim to extend the reach of Microcode by providing more tangible digital learning opportunities to pre-literate children and communities around the world where access to mains power and internet are scarce.", + "authors": [ + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "Lancashire", + "city": "Lancaster", + "institution": "Lancaster University", + "dsl": "School of Computing and Communications" + } + ], + "personId": 170316 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft", + "dsl": "" + } + ], + "personId": 169764 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Lancaster", + "institution": "Lancaster University", + "dsl": "School of Computing and Communications" + } + ], + "personId": 170508 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Lancaster", + "institution": "Lancaster University", + "dsl": "School of Computing and Communication" + } + ], + "personId": 169781 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Redmond", + "institution": "Microsoft Research", + "dsl": "" + } + ], + "personId": 169736 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "Lancashire", + "city": "Lancaster", + "institution": "Lancaster University", + "dsl": "School of Computing and Communications" + } + ], + "personId": 170229 + } + ] + }, + { + "id": 170944, + "typeId": 13748, + "title": "Understanding and Supporting Debugging Workflows in CAD", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676353" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5809", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171040, + 175080 + ], + "eventIds": [], + "abstract": "One of the core promises of parametric Computer-Aided Design (CAD) is that users can easily edit their model at any point in time.\r\nHowever, due to the ambiguity of changing references to intermediate, updated geometry, parametric edits can lead to reference errors which are difficult to fix in practice.\r\nWe claim that debugging reference errors remains challenging because CAD systems do not provide users with tools to understand where the error happened and how to fix it.\r\nTo address these challenges, we prototype a graphical debugging tool, DeCAD, which helps comparing CAD model states both across operations and across edits.\r\nIn a qualitative lab study, we use DeCAD as a probe to understand specific challenges that users face and what workflows they employ to overcome them.\r\nWe conclude with design implications for future debugging tool developers. \r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "CSE" + } + ], + "personId": 170613 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G Allen School of Computer Science and Engineering" + } + ], + "personId": 170228 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G Allen School of Computer Science and Engineering " + } + ], + "personId": 170560 + } + ] + }, + { + "id": 170945, + "typeId": 13749, + "title": "FisheyeVR: Extending the Field of View by Dynamic Zooming in Virtual Reality", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6483", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175074, + 171059 + ], + "eventIds": [], + "abstract": "We propose FisheyeVR, a zooming interface in VR dynamically providing users a larger software FOV by zooming out to a shorter virtual focal length, trading in an acceptable visual distortion for more context.\r\nWe conduct studies to (1) understand the visual distortion of zoom-out FOVs, (2) test 4 triggering methods with common VR scenarios and (3) evaluate the integrated FisheyeVR system.\r\nOur findings demonstrate that FisheyeVR not only significantly reduces users' physical effort and oculomotor simulator sickness but also maintains performance levels, accompanied by positive feedback.", + "authors": [ + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 169771 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170582 + } + ] + }, + { + "id": 170946, + "typeId": 13749, + "title": "Digital Phenotyping based on a Mobile App Identifies Distinct and Overlapping Features in Children Diagnosed with Autism versus ADHD", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686323" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-1132", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175061 + ], + "eventIds": [], + "abstract": "The high prevalence of autism calls for accessible and scalable technology-assisted screening tools. This will aid in early detection allowing timely access to services and supports. SenseToKnow, a mobile digital phenotyping app, showed potential in eliciting autism-related behaviors that can be automatically captured via computer vision analysis (CVA) in toddlers. Here, we present the capability of SenseToKnow in characterizing autism in school age children and showcase the robustness of the CVA features in interpreting distinct and overlapping behaviors with attention-deficit/hyperactive disorder (ADHD).", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "" + } + ], + "personId": 170421 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 169977 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Department of Psychiatry and Behavioral Sciences" + }, + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Duke Center for Autism and Brain Development" + } + ], + "personId": 170406 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Pratt School of Engineering, Duke University", + "dsl": "Department of Electrical and Computer Science Engineering" + }, + { + "country": "Uruguay", + "state": "", + "city": "Montevideo", + "institution": "Universidad Catolica del Uruguay", + "dsl": "" + } + ], + "personId": 169861 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Office of Information Technology" + } + ], + "personId": 169942 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Department of Psychiatry and Behavioral Sciences" + }, + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Duke Center for Autism and Brain Development" + } + ], + "personId": 170538 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Department of Psychiatry and Behavioral Sciences" + }, + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke Global Health Institute", + "dsl": "" + } + ], + "personId": 169945 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Department of Psychiatry and Behavioral Sciences" + } + ], + "personId": 170460 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Department of Psychiatry and Behavioral Sciences, Duke University" + }, + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Duke Center for Autism and Brain Development" + } + ], + "personId": 170258 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Pratt School of Engineering" + }, + { + "country": "United States", + "state": "North Carolina", + "city": "Durham", + "institution": "Duke University", + "dsl": "Departments of Biomedical Engineering, Mathematics, and Computer Sciences" + } + ], + "personId": 170515 + } + ] + }, + { + "id": 170947, + "typeId": 13749, + "title": "Transforming Procedural Instructions into In-Situ Augmented Reality Guides with InstructAR", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-8543", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175070, + 171049 + ], + "eventIds": [], + "abstract": "Following manual procedural instructions can often be frustrating due to the mental gap between the instructions and their real-world application. Variations in settings can further compound this confusion for those attempting to follow them. To address this problem, we propose InstructAR, a system under development that bridges textual instructions with the real world through Augmented Reality (AR) guides. With this tool, users can easily follow clear and actionable step-by-step instructions, as related objects are highlighted and AR guides indicate necessary actions. InstructAR also provides feedback upon task completion. This proof-of-concept leverages Natural Language Processing and Computer Vision techniques. Our work aims to reduce cognitive load and errors, making it easier for users to follow manual procedural instructions accurately.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Virginia", + "city": "Blacksburg", + "institution": "Virginia Tech", + "dsl": "" + } + ], + "personId": 170023 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169785 + } + ] + }, + { + "id": 170948, + "typeId": 13745, + "title": "Physical and Social Adaptation for Assistive Robot Interactions", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686713" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24e-1028", + "source": "PCS", + "trackId": 13202, + "tags": [], + "keywords": [], + "sessionIds": [ + 171036 + ], + "eventIds": [], + "abstract": "Robots have the potential to provide users with limited mobility additional ways of interacting with the world around them. However, each user has preferences for how they interact with these physical interfaces. My dissertation research develops tools and algorithms to allow robot interactions to adapt to the individual needs of users. In particular, I develop ways to adapt a robot's design, physical movements, and social behaviors. By adapting robots to users I hope to develop systems that more holistically aid users with limited mobility.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "LOS ANGELES", + "institution": "University of Southern California", + "dsl": "Computer Science Department" + } + ], + "personId": 170681 + } + ] + }, + { + "id": 170949, + "typeId": 13749, + "title": "Game Jam with CARDinality: A Case Study of Exploring Play-based Interactive Applications", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6925", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175064 + ], + "eventIds": [], + "abstract": "This paper delves into a Game Jam, a workshop-based investigation widespread in the game design community, to explore the potential and applications of CARDinality, a card-shaped robotic device, giving insights into the material implications and prototyping potential of this novel platform. During the Game Jam, 9 participants were informed of basic game design principles and techniques from a game design researcher. They then interacted with our hardware devices to brainstorm future card games supported by CARDinality, revealing a breadth of unique applications anchored in 'play.'", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Department of Computer Science" + } + ], + "personId": 170540 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Department of Computer Science" + } + ], + "personId": 169974 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170271 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169703 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 170950, + "typeId": 13748, + "title": "UIClip: A Data-driven Model for Assessing User Interface Design", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676408" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8532", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175076, + 171053 + ], + "eventIds": [], + "abstract": "User interface (UI) design is a difficult yet important task for ensuring the usability, accessibility, and aesthetic qualities of applications. In our paper, we develop a machine-learned model, UIClip, for assessing the design quality and visual relevance of a UI given its screenshot and natural language description. To train UIClip, we used a combination of automated crawling, synthetic augmentation, and human ratings to construct a large-scale dataset of UIs, collated by description and ranked by design quality. Through training on the dataset, UIClip implicitly learns properties of good and bad designs by (i) assigning a numerical score that represents a UI design's relevance and quality and (ii) providing design suggestions. In an evaluation that compared the outputs of UIClip and other baselines to UIs rated by 12 human designers, we found that UIClip achieved the highest agreement with ground-truth rankings. Finally, we present three example applications that demonstrate how UIClip can facilitate downstream applications that rely on instantaneous assessment of UI design quality: (i) UI code generation, (ii) UI design tips generation, and (iii) quality-aware UI example search.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170064 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170368 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170122 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Apple", + "dsl": "" + } + ], + "personId": 170680 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Apple", + "dsl": "" + } + ], + "personId": 170699 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Diego", + "institution": "Apple Inc", + "dsl": "" + } + ], + "personId": 170590 + } + ] + }, + { + "id": 170951, + "typeId": 13748, + "title": "SQLucid: Grounding Natural Language Database Queries with Interactive Explanations", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676368" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4055", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171055, + 175081 + ], + "eventIds": [], + "abstract": "Though recent advances in machine learning have led to significant improvements in natural language interfaces for databases, the accuracy and reliability of these systems remain limited, especially in high-stakes domains. This paper introduces SQLucid, a novel user interface that bridges the gap between non-expert users and complex database querying processes. SQLucid addresses existing limitations by integrating visual correspondence, intermediate query results, and editable step-by-step SQL explanations in natural language to facilitate user understanding and engagement. This unique blend of features empowers users to understand and refine SQL queries easily and precisely. Two user studies and one quantitative experiment were conducted to validate SQLucid’s effectiveness, showing significant improvement in task completion accuracy and user confidence compared to existing interfaces. Our code is available at https://github.com/magic-YuanTian/SQLucid.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue university", + "dsl": "Computer Science department" + } + ], + "personId": 170335 + }, + { + "affiliations": [ + { + "country": "Australia", + "state": "NSW", + "city": "Sydney", + "institution": "The University of Sydney", + "dsl": "School of Computer Science" + } + ], + "personId": 169782 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "Notre Dame", + "institution": "University of Notre Dame", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 169973 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "Computer Science" + } + ], + "personId": 170186 + } + ] + }, + { + "id": 170952, + "typeId": 13748, + "title": "Auptimize: Optimal Placement of Spatial Audio Cues for Extended Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676424" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8098", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171034, + 175091 + ], + "eventIds": [], + "abstract": "Spatial audio in Extended Reality (XR) provides users with better awareness of where virtual elements are placed, and efficiently guides them to events such as notifications, system alerts from different windows, or approaching avatars. Humans, however, are inaccurate in localizing sound cues, especially with multiple sources due to limitations in human auditory perception such as angular discrimination error and front-back confusion. This decreases the efficiency of XR interfaces because users misidentify from which XR element a sound is coming. To address this, we propose Auptimize, a novel computational approach for placing XR sound sources, which mitigates such localization errors by utilizing the ventriloquist effect. Auptimize disentangles the sound source locations from the visual elements and relocates the sound sources to optimal positions for unambiguous identification of sound cues, avoiding errors due to inter-source proximity and front-back confusion. Our evaluation shows that Auptimize decreases spatial audio-based source identification errors compared to playing sound cues at the paired visual-sound locations. We demonstrate the applicability of Auptimize for diverse spatial audio-based interactive XR scenarios.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170478 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 169752 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170609 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170243 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Rochester", + "institution": "University of Rochester", + "dsl": "Department of Computer Science" + } + ], + "personId": 169909 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170100 + } + ] + }, + { + "id": 170953, + "typeId": 13748, + "title": "DesignChecker: Visual Design Support for Blind and Low Vision Web Developers", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676369" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3752", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171025, + 175075 + ], + "eventIds": [], + "abstract": "Blind and low vision (BLV) developers create websites to share knowledge and showcase their work. A well-designed website can engage audiences and deliver information effectively, yet it remains challenging for BLV developers to review their web designs. We conducted interviews with BLV developers (N=9) and analyzed 20 websites created by BLV developers. BLV developers created highly accessible websites but wanted to assess the usability of their websites for sighted users and follow the design standards of other websites. They also encountered challenges using screen readers to identify illegible text, misaligned elements, and inharmonious colors. We present DesignChecker, a browser extension that helps BLV developers improve their web designs. With DesignChecker, users can assess their current design by comparing it to visual design guidelines, a reference website of their choice, or a set of similar websites. DesignChecker also identifies the specific HTML elements that violate design guidelines and suggests CSS changes for improvements. Our user study participants (N=8) recognized more visual design errors than using their typical workflow and expressed enthusiasm about using DesignChecker in the future.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Austin", + "institution": "University of Texas, Austin", + "dsl": "Department of Computer Science" + } + ], + "personId": 169963 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Texas", + "city": "Austin", + "institution": "University of Texas, Austin", + "dsl": "Department of Computer Science" + } + ], + "personId": 170458 + } + ] + }, + { + "id": 170954, + "typeId": 13748, + "title": "Who Validates the Validators? Aligning LLM-Assisted Evaluation of LLM Outputs with Human Preferences", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676450" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2544", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171023, + 175084 + ], + "eventIds": [], + "abstract": "Due to the cumbersome nature of human evaluation and limitations of code-based evaluation, Large Language Models (LLMs) are increasingly being used to assist humans in evaluating LLM outputs. Yet LLM-generated evaluators simply inherit all the problems of the LLMs they evaluate, requiring further human validation. We present a mixed-initiative approach to “validate the validators”— aligning LLM-generated evaluation functions (be it prompts or code) with human requirements. Our interface, EvalGen, provides automated assistance to users in generating evaluation criteria and implementing assertions. While generating candidate implementations (Python functions, LLM grader prompts), EvalGen asks humans to grade a subset of LLM outputs; this feedback is used to select implementations that better align with user grades. A qualitative study finds overall support for EvalGen but underscores the subjectivity and iterative nature of alignment. In particular, we identify a phenomenon we dub criteria drift: users need criteria to grade outputs, but grading outputs helps users define criteria. What is more, some criteria appear dependent on the specific LLM outputs observed (rather than independent and definable a priori), raising serious questions for approaches that assume the independence of evaluation from observation of model outputs. We present our interface and implementation details, a comparison of our algorithm with a baseline approach, and implications for the design of future LLM evaluation assistants.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "EPIC Lab" + } + ], + "personId": 170010 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "Computer Science" + } + ], + "personId": 170563 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 169964 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "" + } + ], + "personId": 170401 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Quebec", + "city": "Montréal", + "institution": "Université de Montréal", + "dsl": "Montréal HCI" + } + ], + "personId": 170346 + } + ] + }, + { + "id": 170955, + "typeId": 13748, + "title": "Towards Music-Aware Virtual Assistants", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676416" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2423", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171034, + 175091 + ], + "eventIds": [], + "abstract": "We propose a system for modifying spoken notifications in a manner that is sensitive to the music a user is listening to. Spoken notifications provide convenient access to rich information without the need for a screen. Virtual assistants see prevalent use in hands-free settings such as driving or exercising, activities where users also regularly enjoy listening to music. In such settings, virtual assistants will temporarily mute a user's music to improve intelligibility. However, users may perceive these interruptions as intrusive, negatively impacting their music-listening experience. To address this challenge, we propose the concept of music-aware virtual assistants, where speech notifications are modified to resemble a voice singing in harmony with the user's music. We contribute a system that processes user music and notification text to produce a blended mix, replacing original song lyrics with the notification content. In a user study comparing musical assistants to standard virtual assistants, participants expressed that musical assistants fit better with music, reduced intrusiveness, and provided a more delightful listening experience overall.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 169752 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170100 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Computer Science Department" + } + ], + "personId": 170442 + } + ] + }, + { + "id": 170956, + "typeId": 13748, + "title": "Touchscreen-based Hand Tracking for Remote Whiteboard Interaction", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676412" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4964", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175077, + 171047 + ], + "eventIds": [], + "abstract": "In whiteboard-based remote communication, the seamless integration of drawn content and hand-screen interactions is essential for an immersive user experience. Previous methods either require bulky device setups for capturing hand gestures or fail to accurately track the hand poses from capacitive images. In this paper, we present a real-time method for precise tracking 3D poses of both hands from capacitive video frames. To this end, we develop a deep neural network to identify hands and infer hand joint positions from capacitive frames, and then recover 3D hand poses from the hand-joint positions via a constrained inverse kinematic solver. Additionally, we design a device setup for capturing high-quality hand-screen interaction data and obtained a more accurate synchronized capacitive video and hand pose dataset. Our method improves the accuracy and stability of 3D hand tracking for capacitive frames while maintaining a compact device setup for remote communication. We validate our scheme design and its superior performance on 3D hand pose tracking and demonstrate the effectiveness of our method in whiteboard-based remote communication. ", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Diego", + "institution": "University of California, San Diego", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Microsoft Research Asia", + "dsl": "" + } + ], + "personId": 170521 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Microsoft Research Asia", + "dsl": "Microsoft" + } + ], + "personId": 170072 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Microsoft Research Asia", + "dsl": "Internet Graphics Group" + } + ], + "personId": 170452 + } + ] + }, + { + "id": 170957, + "typeId": 13748, + "title": "Effects of Computer Mouse Lift-off Distance Settings in Mouse Lifting Action", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676442" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5013", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171029, + 175090 + ], + "eventIds": [], + "abstract": "This study investigates the effect of Lift-off Distance (LoD) on a computer mouse, which refers to the height at which a mouse sensor stops tracking when lifted off the surface. Although a low LoD is generally preferred to avoid unintentional cursor movement in mouse lifting (=clutching), especially in first-person shooter games, it may reduce tracking stability. \r\nWe conducted a psychophysical experiment to measure the perceptible differences between LoD levels and quantitatively measured the unintentional cursor movement error and tracking stability at four levels of LoD while users performed mouse lifting. The results showed a trade-off between movement error and tracking stability at varying levels of LoD. Our findings offer valuable information on optimal LoD settings, which could serve as a guide for choosing a proper mouse device for enthusiastic gamers. ", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science & Technology (DGIST)", + "dsl": "" + } + ], + "personId": 170185 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daegu", + "institution": "Daegu Gyeongbuk Institute of Science and Technology (DGIST)", + "dsl": "" + } + ], + "personId": 170431 + } + ] + }, + { + "id": 170958, + "typeId": 13745, + "title": "Sustainable in-house PCB prototyping", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686710" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24e-1032", + "source": "PCS", + "trackId": 13202, + "tags": [], + "keywords": [], + "sessionIds": [ + 171036 + ], + "eventIds": [], + "abstract": "Printed circuit boards (PCBs) are now ubiquitous in everyday objects. Despite advancements in mass production and the widespread availability of circuit design software, PCB manufacturing continues to present significant sustainability challenges due to the continual material sourcing and e-waste generation.\r\n\r\nIn this paper, I investigate the sustainability challenges and potential solutions in PCB production, utilizing in-house prototyping as a focal point. This approach is both accessible for investigation and allows the generalization of findings to broader contexts. I propose novel digital fabrication techniques to enhance material circulation between PCB projects and design iterations, thereby mitigating the environmental impact of electronics manufacturing. I introduce three research archetypes: SolderlessPCB, Fibercuit, and PCB Renewal, each addressing sustainability challenges at different stages of PCB prototyping. These solutions possess the potential to be extrapolated to solve large-scale PCB manufacturing sustainability challenges, heralding a more sustainable future for electronics production.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Maryland", + "city": "College Park", + "institution": "University Of Maryland", + "dsl": "Department of Computer Science" + } + ], + "personId": 170082 + } + ] + }, + { + "id": 170959, + "typeId": 13748, + "title": "Degrade to Function: Towards Eco-friendly Morphing Devices that Function Through Programmed Sequential Degradation", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676464" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1336", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175079, + 171033 + ], + "eventIds": [], + "abstract": "While it seems counterintuitive to think of degradation within an operating device as beneficial, one may argue that when rationally designed, the controlled breakdown of materials—physical, chemical, or biological—can be harnessed for specific functions. To apply this principle to the design of morphing devices, we introduce the concept of \"Degrade to Function\" (DtF). This concept aims to create eco-friendly and self-contained morphing devices that operate through a sequence of environmentally-triggered degradations. We explore its design considerations and implementation techniques by identifying environmental conditions and degradation types that can be exploited, evaluating potential materials capable of controlled degradation, suggesting designs for structures that can leverage degradation to achieve various transformations and functions, and developing sequential control approaches that integrate degradation triggers. To demonstrate the viability and versatility of this design strategy, we showcase several application examples across a range of environmental conditions.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170572 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170041 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170522 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Morphing Matter Lab" + }, + { + "country": "United States", + "state": "California", + "city": "Los Angeles", + "institution": "University of California, Los Angeles", + "dsl": "" + } + ], + "personId": 169863 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + } + ], + "personId": 169984 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170059 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170129 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Mechanical Engineering, Morphing Matter Lab" + } + ], + "personId": 170224 + } + ] + }, + { + "id": 170960, + "typeId": 13748, + "title": "ProgramAlly: Creating Custom Visual Access Programs via Multi-Modal End-User Programming", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676391" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2669", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171024, + 175078 + ], + "eventIds": [], + "abstract": "Existing visual assistive technologies are built for simple and common use cases, and have few avenues for blind people to customize their functionalities. Drawing from prior work on DIY assistive technology, this paper investigates end-user programming as a means for users to create and customize visual access programs to meet their unique needs. We introduce ProgramAlly, a system for creating custom filters for visual information, e.g., 'find NUMBER on BUS', leveraging three end-user programming approaches: block programming, natural language, and programming by example. To implement ProgramAlly, we designed a representation of visual filtering tasks based on scenarios encountered by blind people, and integrated a set of on-device and cloud models for generating and running these programs. In user studies with 12 blind adults, we found that participants preferred different programming modalities depending on the task, and envisioned using visual access programs to address unique accessibility challenges that are otherwise difficult with existing applications. Through ProgramAlly, we present an exploration of how blind end-users can create visual access programs to customize and control their experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170624 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170483 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 169750 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170298 + } + ] + }, + { + "id": 170961, + "typeId": 13749, + "title": "Micro-Gesture Recognition of Tongue via Bone Conduction Sound", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686336" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6256", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175060 + ], + "eventIds": [], + "abstract": "We propose a hands-free and less perceptible gesture sensing method of the tongue by capturing the bone conduction sound generated when the tongue rubs the teeth. The sound is captured by the bone conduction microphones attached behind the ears. In this work, we show that tongue slide, snap, and teeth click gestures can be classified using the decision tree algorithm, which focuses on the characteristics in the sound spectrogram. We conducted a preliminary experiment to verify that input methods for mouth microgesture devices using bone conduction can be expanded from only teeth to teeth and tongue gestures without any additional obtrusive sensors. The evaluation revealed that our method achieved a classification accuracy of 82.7% with user-specific parameter adjustment.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170278 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Tokyo", + "city": "Bunkyo-ku", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170399 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Tokyo", + "city": "Bunkyo-ku", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 169802 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170360 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170696 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 169965 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Tokyo", + "city": "Bunkyo-ku", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 169739 + } + ] + }, + { + "id": 170962, + "typeId": 13749, + "title": "Inkspire: Sketching Product Designs with AI", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686339" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-2011", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175088, + 171049 + ], + "eventIds": [], + "abstract": "With recent advancements in the capabilities of Text-to-Image (T2I) AI models, product designers have begun experimenting with them in their work. However, T2I models struggle to interpret abstract language and the current user experience of T2I tools is akin to a \"slot machine\" rather than a more iterative, co-creative process. To address these challenges, we developed Inkspire, a sketch-driven tool that supports designers in prototyping product design concepts with analogical inspirations and a complete sketch-to-design-to-sketch feedback loop. To inform the design of Inkspire, we conducted a day-long exchange session with professional automotive designers. In a within-subjects study comparing Inkspire to ControlNet, we found that Inkspire supported designers in engaging in both divergent and convergent stages of design, provided more inspiration and exploration of design ideas, and improved aspects of the co-creative process by allowing designers to effectively grasp the current state of the AI to guide it towards their design intentions.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170539 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 169968 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 169679 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human Computer Interaction Institute" + } + ], + "personId": 170251 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Altos", + "institution": "Toyota Research Institute", + "dsl": "" + } + ], + "personId": 170084 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Los Altos", + "institution": "Toyota Research Institute", + "dsl": "" + } + ], + "personId": 170179 + } + ] + }, + { + "id": 170963, + "typeId": 13744, + "title": "Breaking Future Rhythm Visualizer", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686739" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1032", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "In breaking, the timing of the technique and rhythm needs to be matched, but the audience has trouble judging whether or not the timing is correct. Therefore, We developed a system that shows the future rhythm of the sound source the DJ is playing.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "RIKEN", + "dsl": "" + } + ], + "personId": 170256 + } + ] + }, + { + "id": 170964, + "typeId": 13748, + "title": "Qlarify: Recursively Expandable Abstracts for Dynamic Information Retrieval over Scientific Papers", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676397" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4261", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175070, + 171037 + ], + "eventIds": [], + "abstract": "Navigating the vast scientific literature often starts with browsing a paper’s abstract. However, when a reader seeks additional information, not present in the abstract, they face a costly cognitive chasm during their dive into the full text. To bridge this gap, we introduce recursively expandable abstracts, a novel interaction paradigm that dynamically expands abstracts by progressively incorporating additional information from the papers’ full text. This lightweight interaction allows scholars to specify their information needs by quickly brushing over the abstract or selecting AI-suggested expandable entities. Relevant information is synthesized using a retrieval-augmented generation approach, presented as a fluid, threaded expansion of the abstract, and made efficiently verifiable via attribution to relevant source-passages in the paper. Through a series of user studies, we demonstrate the utility of recursively expandable abstracts and identify future opportunities to support low-effort and just-in-time exploration of long-form information contexts through LLM-powered interactions.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170377 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Allen Institute for AI", + "dsl": "AI2" + } + ], + "personId": 170501 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Allen Institute for AI", + "dsl": "" + } + ], + "personId": 170184 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "CSE" + } + ], + "personId": 170600 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Allen Institute for Artificial Intelligence", + "dsl": "Semantic Scholar" + }, + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170443 + } + ] + }, + { + "id": 170965, + "typeId": 13744, + "title": "Emotion Overflow: an Interactive System to Represent Emotion with Fluid", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686779" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1155", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Augmented Emotion", + "Fluid Interface", + "Emotion Visualization", + "Interactive Design", + "Affective Computing", + "Human-Computer Interaction" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "This paper presents Emotion Overflow, a novel system that transforms textual emotional input into interactive fluid visualizations. Utilizing a GPT model, our approach extracts emotion attributes from text and maps them onto fluid characteristics, creating a dynamic visual representation of emotional states. The system focuses on valence and arousal as key emotion attributes, translating them into fluid properties such as color, variance over time, and responsiveness. We demonstrate the application through two interactive experiences: a direct emotion visualization and an emotion guessing game. This work contributes to the field of emotional expression in human-computer interaction by offering a fluid, intuitive interface for exploring and communicating complex emotional states.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "Ibaraki", + "city": "Tsukuba", + "institution": "University of Tsukuba", + "dsl": "Artificial Intelligence Lab" + } + ], + "personId": 172829 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tsukuba", + "institution": "University of Tsukuba", + "dsl": "Colleage of Media Arts, Science and Technology" + } + ], + "personId": 169918 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tsukuba", + "institution": "University of Tsukuba", + "dsl": "Faculty of Engineering, Information and Systems" + } + ], + "personId": 170482 + } + ] + }, + { + "id": 170966, + "typeId": 13744, + "title": "Quilt: Custom UIs for Linking Unstructured Documents to Structured Datasets", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686777" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1156", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Practitioners working with large document dumps struggle to\r\nidentify and record connections between documents and struc-\r\ntured datasets. For example, how should journalists building a\r\ndatabase of police misconduct incidents connect police report doc-\r\numents to their database of officers? The current approach is ei-\r\nther to (i) switch between reading entire documents and querying\r\ndatabases, which is a tedious manual process, or (ii) build a cus-\r\ntom linking UI to help. These linking UIs are typically built from\r\nscratch using vanilla web programming, despite having similar\r\nstructures. We introduce Quilt, a framework for generating cus-\r\ntom UIs for linking between documents and databases. Quilt’s API\r\nlets programmers provide domain knowledge—e.g., what counts as\r\nevidence that a particular database row is relevant to a document.\r\nFrom this information, Quilt generates the UI automatically. We\r\nexplore Quilt’s expressivity via four case studies and find it handles\r\na diverse range of documents and databases.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Electrical Engineering & Computer Sciences" + } + ], + "personId": 170199 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "EECS" + } + ], + "personId": 169820 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "" + } + ], + "personId": 170352 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Minnesota", + "city": "Northfield", + "institution": "Carleton College", + "dsl": "Department of Computer Science" + } + ], + "personId": 170525 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "UC Berkeley", + "dsl": "" + } + ], + "personId": 170401 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 169839 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Berkeley", + "institution": "University of California, Berkeley", + "dsl": "Electrical Engineering and Computer Sciences" + } + ], + "personId": 169729 + } + ] + }, + { + "id": 170967, + "typeId": 13744, + "title": "Patchview: LLM-Powered Worldbuilding with Generative Dust and Magnet Visualization", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1036", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Large language models (LLMs) can help writers build story worlds by generating world elements, such as factions, characters, and locations. However, making sense of many generated elements can be overwhelming. Moreover, if the user wants to precisely control aspects of generated elements that are difficult to specify verbally, prompting alone may be insufficient. We introduce Patchview, a customizable LLM-powered system that visually aids worldbuilding by allowing users to interact with story concepts and elements through the physical metaphor of magnets and dust. Elements in Patchview are visually dragged closer to concepts with high relevance, facilitating sensemaking. The user can also steer the generation with verbally elusive concepts by indicating the desired position of the element between concepts. When the user disagrees with the LLM's visualization and generation, they can correct those by repositioning the element. These corrections can be used to align the LLM's future behaviors to the user's perception. With a user study, we show that Patchview supports the sensemaking of world elements and steering of element generation, facilitating exploration during the worldbuilding process. Patchview provides insights on how customizable visual representation can help sensemake, steer, and align generative AI model behaviors with the user's intentions.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Midjourney", + "dsl": "" + } + ], + "personId": 169907 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Midjourney", + "dsl": "" + } + ], + "personId": 169936 + } + ] + }, + { + "id": 170968, + "typeId": 13744, + "title": "Palimpsest: a Spatial User Interface Toolkit for Cohering Tracked Physical Entities and Interactive 3D Content", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686780" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1157", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Human computer interaction", + "Mixed/augmented reality", + "3D user interfaces", + "Design" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Designing effective 3D user interfaces for immersive technologies remains tedious and manual, particularly for the successful integration of complex interactive 3D content. Palimpsest automates parts of this workflow by associating 3D user interface elements with interactive 3D content, laid out without code by the designer. We demonstrate a custom-developed UI framework, built on top of recently introduced workflows for Apple Vision Pro, streamlining the coherent spatial arrangement of 3D model libraries, animations, and textures in relation to tracked 3D objects or 2D images in the user's environment. In this demonstration, we showcase several sample applications built with Palimpsest in architectural visualization, hybrid puzzle games, and spatial reading.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Stanford", + "institution": "Stanford University", + "dsl": "Mechanical Engineering" + } + ], + "personId": 170124 + } + ] + }, + { + "id": 170969, + "typeId": 13744, + "title": "Conductive Fabric Diaphragm for Noise-Suppressive Headset Microphone", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686768" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1038", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "\"Noise-Suppressive Microphonel", + "Speech Enhancementl", + "Fabric Microphone", + "Wearable Device", + "Voice User Interface\"" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Voice interaction systems require high-quality speech input; however, ambient noise significantly degrades the performance of conventional microphones. While multimodal sensor-based speech enhancement techniques have been proposed, they typically require GPU devices for processing.\r\nWe proposed a conductive fabric diaphragm headset microphone, which integrates a planar fabric microphone into a headset configuration. The proposed microphone exhibits low sensitivity and curved directivity characteristics, enabling efficient capture of the user's voice while simultaneously suppressing background and competing speech noises.\r\nComparative analyses against two commercially available noise-cancelling headsets demonstrated that our approach achieved superior Scale-Invariant Signal-to-Noise Ratio (Si-SNR) performance. Our approach constitutes a real-time, hardware-based noise-suppressive microphone solution with potential applications in speech communication, automatic speech recognition, and voice conversion technologies operating in noise-rich environments.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "Tokyo", + "city": "Bunkyo", + "institution": "The University of Tokyo", + "dsl": "Graduate School of Interdisciplinary Information Studies" + }, + { + "country": "Japan", + "state": "Chiba", + "city": "Kashiwa", + "institution": "National Institute of Advanced Industrial Science and Technology", + "dsl": "Human Augmentation Research Center" + } + ], + "personId": 170489 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Chiba", + "city": "Kashiwa", + "institution": "National Institute of Advanced Industrial Science and Technology", + "dsl": "Human Augmentation Research Center" + } + ], + "personId": 170541 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Kashiwa", + "institution": "National Institute of Advanced Industrial Science and Technology (AIST)", + "dsl": "Human Augmentation Research Center (HARC)" + } + ], + "personId": 170528 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tsukuba", + "institution": "National Institute of Advanced Industrial Science and Technology", + "dsl": "Sensing System Reseach Center" + } + ], + "personId": 170312 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Chiba", + "city": "Kashiwa", + "institution": "The National Institute of Advanced Industrial Science and Technology", + "dsl": "Human Augmentation Research Center" + } + ], + "personId": 169689 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + }, + { + "country": "Japan", + "state": "", + "city": "Kyoto", + "institution": "Sony CSL Kyoto", + "dsl": "" + } + ], + "personId": 169707 + } + ] + }, + { + "id": 170970, + "typeId": 13744, + "title": "IRIS: Wireless ring for vision-based smart home interaction", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1039", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Integrating cameras into wireless smart rings has been challenging due to size and power constraints. We introduce IRIS, the first wireless vision-enabled smart ring system for smart home interactions. Equipped with a camera, Bluetooth radio, inertial measurement unit (IMU) and an onboard battery, IRIS meets the small size, weight, and power (SWaP) requirements for ring devices. IRIS is context-aware, adapting its gestures to the detected device, and can last for 16-24 hours on a single charge. IRIS employs a human-in-the-loop approach that continuously improves device recognition accuracy. Demonstrating superior speed across tested device categories, IRIS can quickly control devices from gesture initiation, surpassing voice commands by several seconds. In a study involving 23 participants, IRIS consistently outpaced voice commands, with a higher proportion of participants expressing a preference for IRIS over voice commands regarding toggling a device's state, granular control, and social acceptability. Our work addresses systems challenges, pushing the boundary of what is possible with ring form-factor devices and opening up novel applications.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Mobile Intelligence Lab" + } + ], + "personId": 169691 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Mobile Intelligence Lab" + } + ], + "personId": 170607 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Mobile Intelligence Lab" + } + ], + "personId": 170656 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science and Engineering" + } + ], + "personId": 170602 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science and Engineering" + } + ], + "personId": 170591 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science and Engineering" + } + ], + "personId": 170504 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science and Engineering" + } + ], + "personId": 170608 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Mobile Intelligence Lab" + } + ], + "personId": 169985 + } + ] + }, + { + "id": 170971, + "typeId": 13748, + "title": "Embrogami: Shape-Changing Textiles with Machine Embroidery", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676431" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6557", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171020, + 175086 + ], + "eventIds": [], + "abstract": "Machine embroidery is a versatile technique for creating custom and entirely fabric-based patterns on thin and conformable textile surfaces. However, existing machine-embroidered surfaces remain static, limiting the interactions they can support. We introduce Embrogami, an approach for fabricating textile structures with versatile shape-changing behaviors. Inspired by origami, we leverage machine embroidery to form finger-tip-scale mountain-and-valley structures on textiles with customized shapes, bistable or elastic behaviors, and modular composition. The structures can be actuated by the user or the system to modify the local textile surface topology, creating interactive elements like toggles and sliders or textile shape displays with an ultra-thin, flexible, and integrated form factor. We provide a dedicated software tool and report results of technical experiments to allow users to flexibly design, fabricate, and deploy customized Embrogami structures. With four application cases, we showcase Embrogami’s potential to create functional and flexible shape-changing textiles with diverse visuo-tactile feedback.", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrucken", + "institution": "Saarland University", + "dsl": "Human Computer Interaction Lab" + } + ], + "personId": 170035 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "Saarland Informatics Campus", + "dsl": "Saarland University" + } + ], + "personId": 170363 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "" + } + ], + "personId": 169794 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Aachen", + "institution": "RWTH Aachen University", + "dsl": "" + } + ], + "personId": 169742 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "Saarland University, Saarland Informatics Campus", + "dsl": "" + } + ], + "personId": 169850 + } + ] + }, + { + "id": 170972, + "typeId": 13748, + "title": "SpinShot: Optimizing Both Physical and Perceived Force Feedback of Flywheel-Based, Directional Impact Handheld Devices", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676433" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8615", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175059, + 171041 + ], + "eventIds": [], + "abstract": "Real-world impact, such as hitting a tennis ball and a baseball, generates instantaneous, directional impact forces. However, current ungrounded force feedback technologies, such as air jets and propellers, can only generate directional impulses that are 10x-10,000x weaker. We present SpinShot, a flywheel-based device with a solenoid-actuated stopper capable of generating directional impulse of 22Nm in 1ms, which is more than 10x stronger than prior ungrounded directional technologies. Furthermore, we present a novel force design that reverses the flywheel immediately after the initial impact, to significantly increase the perceived magnitude. We conducted a series of two formative, perceptual studies (n=16, 18), followed by a summative user experience study (n=16) that compared SpinShot vs. moving mass (solenoid) and vs. air jets in a VR baseball hitting game. Results showed that SpinShot significantly improved realism, immersion, magnitude (p < .01) compared to both baselines, but significantly reduced comfort vs. air jets primarily due to the 2.9x device weight. Overall, SpinShot was preferred by 63-75% of the participants.", + "authors": [ + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170262 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "HCI Lab" + } + ], + "personId": 170078 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170067 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170422 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170635 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170614 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 169814 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 169837 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 170441 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 169827 + } + ] + }, + { + "id": 170973, + "typeId": 13744, + "title": "Demonstration of VRCopilot: Authoring 3D Layouts with Generative AI Models in VR", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1161", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Immersive authoring provides an intuitive medium for users to create 3D scenes via direct manipulation in Virtual Reality (VR). Recent advances in generative AI have enabled the automatic creation of realistic 3D layouts. However, it is unclear how capabilities of generative AI can be used in immersive authoring to support fluid interactions, user agency, and creativity. We introduce VRCopilot, a mixed-initiative system that integrates pre-trained generative AI models into immersive authoring to facilitate human-AI co-creation in VR. VRCopilot presents multimodal interactions to support rapid prototyping and iterations with AI, and intermediate representations such as wireframes to augment user controllability over the created content. Through a series of user studies, we evaluated the potential and challenges in procedural, scaffolded, and automatic creation in immersive authoring. We found that scaffolded creation using wireframes enhanced the user agency compared to automatic creation. We also found that procedural creation via multimodal specification offers the highest sense of creativity and agency.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170481 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 169859 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 169690 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "School of Information" + } + ], + "personId": 170239 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Michigan", + "city": "Ann Arbor", + "institution": "University of Michigan", + "dsl": "Computer Science and Engineering" + } + ], + "personId": 170298 + } + ] + }, + { + "id": 170974, + "typeId": 13748, + "title": "ProtoDreamer: A Mixed-prototype Tool Combining Physical Model and Generative AI to Support Conceptual Design", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676399" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5585", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175088, + 171032 + ], + "eventIds": [], + "abstract": "Prototyping serves as a critical phase in the industrial conceptual design process, enabling exploration of problem space and identification of solutions. Recent advancements in large-scale generative models have enabled AI to become a co-creator in this process. However, designers often consider generative AI challenging due to the necessity to follow computer-centered interaction rules, diverging from their familiar design materials and languages. Physical prototype is a commonly used design method, offering unique benefits in prototype process, such as intuitive understanding and tangible testing. In this study, we propose ProtoDreamer, a mixed-prototype tool that synergizes generative AI with physical prototype to support conceptual design. ProtoDreamer allows designers to construct preliminary prototypes using physical materials, while AI recognizes these forms and vocal inputs to generate diverse design alternatives. This tool empowers designers to tangibly interact with prototypes, intuitively convey design intentions to AI, and continuously draw inspiration from the generated artifacts. An evaluation study confirms ProtoDreamer’s utility and strengths in time efficiency, creativity support, defects exposure, and detailed thinking facilitation.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169831 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169957 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "School of Computer Science and Technology", + "dsl": "Zhejiang University" + } + ], + "personId": 170165 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou, Zhejiang", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170233 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170107 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 170139 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "College of Computer Science and Technology", + "dsl": "Zhejiang University" + } + ], + "personId": 169880 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 169901 + } + ] + }, + { + "id": 170975, + "typeId": 13744, + "title": "MagicDraw: Haptic-Assisted One-Line Drawing with Shared Control", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686753" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1041", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Assisted Sketching", + "Force Feedback", + "Haptics", + "Shared Control" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We present MagicDraw, a platform designed for force feedback guidance in one-line drawing. MagicDraw allows users to transition seamlessly between fully assisted sketching and freehand drawing through a control-sharing mechanism. The initial drawing concept is generated based on user input prompts. This platform operates similarly to tracing but introduces two major enhancements. First, MagicDraw provides force feedback guidance, aiding users in maintaining accurate line-following. Second, the system enables dynamic control sharing, allowing users to deviate from the predefined path and engage in creative exploration. We also introduce “exploration region,” where users can perform freehand drawing. In these regions, the predefined path advances outside the boundary, pausing for the user’s creative deviations. As the user returns to fully assisted sketching, these regions shrink until the user resumes force feedback-guided tracing. This approach ensures users can explore creative variations while still receiving structured guidance.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "Huawei", + "dsl": "" + } + ], + "personId": 170280 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Arizona", + "city": "Tempe", + "institution": "Arizona State University", + "dsl": "School of Computing and Augmented Intelligence" + } + ], + "personId": 169823 + } + ] + }, + { + "id": 170976, + "typeId": 13748, + "title": "WasteBanned: Supporting Zero Waste Fashion Design Through Linked Edits", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676395" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3288", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175079, + 171033 + ], + "eventIds": [], + "abstract": "The commonly used cut-and-sew garment construction process, in which 2D fabric panels are cut from sheets of fabric and assembled into 3D garments, contributes to widespread textile waste in the fashion industry. There is often a significant divide between the design of the garment and the layout of the panels. One opportunity for bridging this gap is the emerging study and practice of zero waste fashion design, which involves creating clothing designs with maximum layout efficiency. Enforcing the strict constraints of zero waste sewing is challenging, as edits to one region of the garment necessarily affect neighboring panels. Based on our formative work to understand this emerging area within fashion design, we present WasteBanned, a tool that combines CAM and CAD to help users prioritize efficient material usage, work within these zero waste constraints, and edit existing zero waste garment patterns. Our user evaluation indicates that our tool helps fashion designers edit zero waste patterns to fit different bodies and add stylistic variation, while creating highly efficient fabric layouts.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "" + } + ], + "personId": 170309 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT CSAIL", + "dsl": "" + } + ], + "personId": 170347 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G Allen School of Computer Science" + } + ], + "personId": 169724 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G Allen School of Computer Science and Engineering " + } + ], + "personId": 170560 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 169884 + } + ] + }, + { + "id": 170977, + "typeId": 13744, + "title": "EgoTouch: On-Body Touch Input Using AR/VR Headset Cameras", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1163", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "In augmented and virtual reality (AR/VR) experiences, a user’s arms and hands can provide a convenient and tactile surface for touch input. Prior work has shown on-body input to have significant speed, accuracy, and ergonomic benefits over in-air interfaces, which are common today. In this work, we demonstrate high accuracy, bare hands (i.e., no special instrumentation of the user) skin input using just an RGB camera, like those already integrated into all modern XR headsets. Our results show this approach can be accurate, and robust across diverse lighting conditions, skin tones, and body motion (e.g., input while walking). Finally, our pipeline also provides rich input metadata including touch force, finger identification, angle of attack, and rotation. We believe these are the requisite technical ingredients to more fully unlock on-skin interfaces that have been well motivated in the HCI literature but have lacked robust and practical methods.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170386 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 170374 + } + ] + }, + { + "id": 170978, + "typeId": 13749, + "title": "Undercover Assistance: Designing a Disguised App to Navigate Sexual Harassment", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686332" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-2128", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175089 + ], + "eventIds": [], + "abstract": "Sexual harassment remains a significant safety concern for many women. Despite extensive discussions within the HCI community about technological solutions to address harassment, previous research has primarily focused on cyber harassment, with relatively little work examining design interventions to assist women \\textit{during} incidents of sexual harassment in physical spaces. To address this gap, we conducted a survey with 106 women who have experienced sexual harassment, aiming to identify common contexts of occurrence and the coping strategies employed. The survey revealed that social gatherings, such as parties, are the most frequent settings for sexual harassment. The most common coping strategy involved pretending to have an urgent matter and leaving the scene; however, these strategies varied depending on the context, whether harassed by an acquaintance or stranger. Furthermore, many participants expressed concern that using a safety app could escalate the situation by provoking the perpetrator. Drawing from these insights, we propose the design of a disguised app that mimics a social media app, thereby reducing suspicion when used near the perpetrator. The app includes multiple functionalities recommended by survey participants: 1) discreetly sharing location and chatting with friends, 2) recording, 3) an emergency alarm, and 4) triggering a fake call from a male voice. User testing with college students has suggested the effectiveness of these safety measures. This paper discusses how these design features address the challenges of safely navigating sexual harassment incidents.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Florida", + "city": "Gainesville", + "institution": "University of Florida", + "dsl": "" + } + ], + "personId": 169995 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Florida", + "city": "Gainesville", + "institution": "University of Florida", + "dsl": "Digital Worlds Institute" + } + ], + "personId": 170455 + } + ] + }, + { + "id": 170979, + "typeId": 13749, + "title": "Electrical Connected Orchestra: A New Baton System that can Interactively Control the Body Movements of Performers", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686344" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-3458", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175091 + ], + "eventIds": [], + "abstract": "This research leads to the creation of an innovative music performance system that connects conductors and performers. We developed an automatic performance system, that can interactively control the body movements of performers. This system converts music arranged by melody morphing into electrical muscle stimulations (EMS) and controls the body movements of multiple people with devices attached to their hands and feet to realize the performance. In addition, by controlling the behavior of the EMS using a baton interface with a built-in acceleration sensor, the conductor can interactively change the melody, tempo, and velocity of the performance.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "RIKEN", + "dsl": "" + } + ], + "personId": 170663 + } + ] + }, + { + "id": 170980, + "typeId": 13749, + "title": "Enhancing Readability with a Target-Aware Zooming Technique for Touch Surfaces", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-1395", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175065, + 171049 + ], + "eventIds": [], + "abstract": "Double-tapping is a common way to zoom in and out of content on touch surfaces, snapping between overview and detail levels. However, the optimal zoom level often differs for each user depending on the displayed content and user characteristics (e.g., age or visual acuity). When focusing on content containing text, the scaling factor is especially crucial for readability. However, the conventional double-tap to zoom uses a fixed scale factor, which often requires users to manually adjust the zoom level by pinching in and out after double-tapping. Additionally, on small-screen devices such as smartphones, the specific area of interest may not fit within the screen after zooming in, causing users to pan repeatedly to adjust the display position of the content. To address these issues, we propose a target-aware zooming technique that dynamically adjusts the zoom level based on the content and user preferences. Furthermore, to minimize the need for panning, our technique simultaneously snaps the top-left corner of the bounding box of the tapped text to the top-left corner of the screen as the content is zoomed in. This approach aims to reduce the need for manual adjustments, improving usability and readability of digital content for diverse user groups.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "LY Corporation", + "dsl": "" + } + ], + "personId": 170189 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Tokyo University of Technology", + "dsl": "" + } + ], + "personId": 170001 + } + ] + }, + { + "id": 170981, + "typeId": 13749, + "title": "Mapping Gaze and Head Movement via Salience Modulation and Hanger Reflex", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686349" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-4660", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 171059, + 175060 + ], + "eventIds": [], + "abstract": "Vision is crucial for daily input and plays a significant role in remote collaboration. Sharing gaze has been explored to enhance communication, but sharing gaze alone is not natural due to limited central vision (30 degrees). We propose a novel approach to map gaze and head movements simultaneously, enabling replicating natural observation across individuals. In this paper, we evaluate the effectiveness of replication head movements and gaze on another person by a pilot study. In the future, we will also explore the possibility of improving novices' efficiency in imitating experts by replicating the gaze trajectories of experts.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170505 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 169704 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170183 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 169982 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + }, + { + "country": "Japan", + "state": "", + "city": "Kyoto", + "institution": "Sony CSL Kyoto", + "dsl": "" + } + ], + "personId": 169707 + } + ] + }, + { + "id": 170982, + "typeId": 13749, + "title": "Investigating the Design Space of Affective Touch on the Forearm Area", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686320" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24c-6044", + "source": "PCS", + "trackId": 13204, + "tags": [], + "keywords": [], + "sessionIds": [ + 175059, + 171049 + ], + "eventIds": [], + "abstract": "Affective touch, which involves slow, gentle mechanical stimulation of the skin, is drawing interest from the Human-Computer Interaction community in recent years. Stroking, the most common form of affective touch, is strongly related to emotional responses and has been proven beneficial for interventing and mitigating anxiety. This has led to a growing need for developing wearable stroking devices. In our study, we first presented a custom-built forearm-worn interface. We then explored the design parameters for stroking devices, focusing on 2 factors: (1) the form factors (shape / material) of the end effector contacted directly with the skin, and (2) the stroking distance, respectively.", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Dresden", + "institution": "Dresden University of Technology", + "dsl": "Chair of Acoustics and Haptics" + } + ], + "personId": 170675 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Dresden", + "institution": "Dresden University of Technology", + "dsl": "" + } + ], + "personId": 170223 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Dresden", + "institution": "Dresden University of Technology", + "dsl": "Chair of Acoustics and Haptics" + } + ], + "personId": 170201 + } + ] + }, + { + "id": 170983, + "typeId": 13744, + "title": "Personal Time-Lapse", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1164", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Our bodies are constantly in motion—from the bending of arms and legs to the less conscious movement of breathing, our precise shape and location change constantly. This can make subtler developments (e.g., the growth of hair, or the healing of a wound) difficult to observe. Our work focuses on helping users record and visualize this type of subtle, longer-term change. We present a mobile tool that combines custom 3D tracking with interactive visual feedback and computational imaging to capture personal time-lapse, which approximates longer-term video of the subject (typically, part of the capturing user’s body) under a fixed viewpoint, body pose, and lighting condition. These personal time-lapses offer a powerful and detailed way to track visual changes of the subject over time. We begin with a formative study that examines what makes personal time-lapse so difficult to capture. Building on our findings, we motivate the design of our capture tool, evaluate this design with users, and demonstrate its effectiveness in a variety of challenging examples.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "Computer Science" + } + ], + "personId": 170027 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Ithaca", + "institution": "Cornell University", + "dsl": "Computer Science" + } + ], + "personId": 170325 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Cornell University", + "dsl": "Information Science" + } + ], + "personId": 169735 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Cornell Tech, Cornell University", + "dsl": "" + } + ], + "personId": 170266 + } + ] + }, + { + "id": 170984, + "typeId": 13744, + "title": "Computational Design and Fabrication of 3D Printed Zippers Connecting 3D Textile Structures", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686743" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1044", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "3D Printing", + "Computational Design and Fabrication", + "Zippers" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Zippers have long been used for connecting and disconnecting two textiles repeatedly and easily. However, many industrial zippers cannot retain their shape after assembly, limiting the potential use of connecting soft textiles to form 3D shapes with large and various curvatures. Thus, we present a method to design and fabricate interlocking 3D printable zippers that can encode post-assembly shapes. The user first gives a target 3D shape divided into developable patches (i.e., curved surfaces unrollable like paper). Then, our design software built on Rhino/Grasshopper computes an interlocking zipper on the boundary curve of the patches. The user 3D prints the zipper in a flat state and welds it to the edge of the textiles by thermal bonding, which can zip into a target 3D shape. In this demo, we report our method and exhibit design examples of 3D printed zippers.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170304 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + }, + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "" + } + ], + "personId": 169702 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Kanagawa", + "institution": "Keio University", + "dsl": "" + }, + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "The University of Tokyo", + "dsl": "" + } + ], + "personId": 170696 + } + ] + }, + { + "id": 170985, + "typeId": 13744, + "title": "Edible Lens Array: Dishes with lens-shaped jellies that change their appearance depending on the viewpoint", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686745" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1048", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Edible Optics, Lens Array, Lenticular Lens, Digital Food Fabrication, 3D Printing" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "This study presents food products whose appearance, such as color and image, changes depending on the viewpoint. This was achieved by fabricating jelly with structures of convex lenses, which is arranged in a two-dimensional plane using 3D-printed molds. This enables interactive gastronomic experiences with the presentation from multiple viewpoints. In this study, we developed a system that supports the design and fabrication workflow for edible lens arrays. Using our system, users can design arbitrary lens array shapes and simulate their appearance based on the refractive index of the jelly material. The system then outputs a 3D mold model for casting the jelly lenses. In addition, we created several dishes that exhibit viewpoint-dependent changes in appearance, demonstrating their potential for creating interactive gastronomic experiences.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Meiji University", + "dsl": "" + } + ], + "personId": 170205 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Meiji University", + "dsl": "" + } + ], + "personId": 170259 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Tokyo", + "institution": "Meiji University", + "dsl": "" + } + ], + "personId": 170532 + } + ] + }, + { + "id": 170986, + "typeId": 13756, + "title": "SenseBot: Supporting Embodied Remote Communication through AR-enabled Social Robot", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686734" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24d-6054", + "source": "PCS", + "trackId": 13203, + "tags": [], + "keywords": [], + "sessionIds": [ + 171027 + ], + "eventIds": [], + "abstract": "We plan to design SenseBot, an AR-based social robot, to enhance remote intergenerational communication between children and their distant family members—such as siblings, parents, and grandparents. SenseBot will support asymmetric interaction with a web-based application for the remote user and a robot with a mobile phone for the local user. It will allow remote users to customize the robot's appearance as their communication agent and send embodied messages with AR effects. The system also utilizes AI and LLMs to recognize the local user’s emotions to facilitate local-user-initiated communication.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "Khoury College of Computer Science", + "dsl": "Northeastern University " + } + ], + "personId": 170531 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "shanghai", + "institution": "shanghai jiaotong university", + "dsl": "" + } + ], + "personId": 169692 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Minnesota", + "city": "Minneapolis", + "institution": "University of Minnesota", + "dsl": "Department of Computer Science and Engineering" + } + ], + "personId": 169732 + } + ] + }, + { + "id": 170987, + "typeId": 13748, + "title": "E-Joint: Fabrication of Large-Scale Interactive Objects Assembled by 3D Printed Conductive Parts with Copper Plated Joints", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676398" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-6691", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175068, + 171052 + ], + "eventIds": [], + "abstract": "The advent of conductive thermoplastic filaments and multi-material 3D printing has made it feasible to create interactive 3D printed objects. Yet, challenges arise due to volume constraints of desktop 3D printers and high resistive characteristics of current conductive materials, making the fabrication of large-scale or highly conductive interactive objects can be daunting. We propose E-Joint, a novel fabrication pipeline for 3D printed objects utilizing mortise and tenon joint structures combined with a copper plating process. The segmented pieces and joint structures are customized in software along with integrated circuits. Then electroplate them for enhanced conductivity. We designed four distinct electrified joint structures in experiment and evaluated the practical feasibility and effectiveness of fabricating pipes. By constructing three applications with those structures, we verified the usability of E-Joint in making large-scale interactive objects and show path to a more integrated future for manufacturing.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "HangZhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "HangZhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170169 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170014 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169680 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169870 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170208 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang university", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang university", + "dsl": "" + } + ], + "personId": 170046 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170369 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang/China", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "Zhejiang/China", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170535 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169847 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "MACAU", + "institution": "MACAU University of Science and Technology", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "MACAU", + "institution": "MACAU University of Science and Technology", + "dsl": "" + } + ], + "personId": 170644 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + }, + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170103 + } + ] + }, + { + "id": 170988, + "typeId": 13744, + "title": "Artist-Centric CNC Tool Design: Demonstration of the Craft-Aligned Scanner and Digital Pottery Wheel", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1049", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Digital fabrication workflows that prioritize computer-based interactions are often mismatched to the embodied knowledge and material-driven design approaches of traditional artists and craftspeople. This is especially evident in clay 3D printing, where existing tools have evolved from automated industrial machinery, and operate in stark contrast with the direct material engagement of manual ceramics craft. To close this gap, we develop a series of tools and interactions based on the metaphor of 3D-printer-as-pottery-wheel. The Digital Pottery Wheel (DPW) extends the capabilities of a traditional throwing wheel with a polar-coordinate 3D printing mechanism, while the Craft Aligned Scanner (CAS) captures a form placed on the wheel as a directly printable toolpath. Supporting this work is a real-time modular control system that enables live recording and modification of toolpaths, and blending of automatic and manual control. In this hands-on demonstration, we showcase seven new interactive workflows for clay 3D printing that have emerged from our exploration.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "Massachusetts Institute of Technology", + "dsl": "Department of Mechanical Engineering" + } + ], + "personId": 170101 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "SANTA BARBARA", + "institution": "University of California, Santa Barbara", + "dsl": "Media Arts and Technology" + } + ], + "personId": 170573 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Santa Barbara", + "institution": "University of California, Santa Barbara ", + "dsl": "Media Arts and Technology" + } + ], + "personId": 170435 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Santa Barbara", + "institution": "University of California Santa Barbara", + "dsl": "Media Arts and Technology" + } + ], + "personId": 169813 + } + ] + }, + { + "id": 170989, + "typeId": 13748, + "title": "SIM2VR: Towards Automated Biomechanical Testing in VR", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676452" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3610", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175074, + 171031 + ], + "eventIds": [], + "abstract": "Automated biomechanical testing has great potential for the development of VR applications, as initial insights into user behaviour can be gained in silico early in the design process.\r\nIn particular, it allows prediction of user movements and ergonomic variables, such as fatigue, prior to conducting user studies.\r\nHowever, there is a fundamental disconnect between simulators hosting state-of-the-art biomechanical user models and simulators used to develop and run VR applications. \r\nExisting user simulators often struggle to capture the intricacies of real-world VR applications, reducing ecological validity of user predictions.\r\nIn this paper, we introduce SIM2VR, a system that aligns user simulation with a given VR application by establishing a continuous closed loop between the two processes.\r\nThis, for the first time, enables training simulated users directly in the same VR application that real users interact with.\r\nWe demonstrate that SIM2VR can predict differences in user performance, ergonomics and strategies in a fast-paced, dynamic arcade game. In order to expand the scope of automated biomechanical testing beyond simple visuomotor tasks, advances in cognitive models and reward function design will be needed.", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Bayreuth", + "institution": "University of Bayreuth", + "dsl": "" + }, + { + "country": "United Kingdom", + "state": "", + "city": "Cambridge", + "institution": "University of Cambridge", + "dsl": "" + } + ], + "personId": 170048 + }, + { + "affiliations": [ + { + "country": "Finland", + "state": "", + "city": "Espoo", + "institution": "Aalto University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170050 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Bayreuth", + "institution": "University of Bayreuth", + "dsl": "Serious Games" + }, + { + "country": "United Kingdom", + "state": "", + "city": "Glasgow", + "institution": "University of Glasgow", + "dsl": "" + } + ], + "personId": 170002 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Bayreuth", + "institution": "University of Bayreuth", + "dsl": "" + }, + { + "country": "Germany", + "state": "", + "city": "Leipzig", + "institution": "Leipzig University", + "dsl": "Center for Scalable Data Analytics and Artificial Intelligence" + } + ], + "personId": 169817 + }, + { + "affiliations": [ + { + "country": "Norway", + "state": "", + "city": "Bergen", + "institution": "University of Bergen", + "dsl": "Department of Information Science and Media Studies" + } + ], + "personId": 170412 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "Scotland", + "city": "Glasgow", + "institution": "University of Glasgow", + "dsl": "School of Computing Science" + } + ], + "personId": 170705 + }, + { + "affiliations": [ + { + "country": "Finland", + "state": "", + "city": "Espoo", + "institution": "Aalto University", + "dsl": "" + } + ], + "personId": 170612 + }, + { + "affiliations": [ + { + "country": "Finland", + "state": "", + "city": "Helsinki", + "institution": "Aalto University", + "dsl": "" + } + ], + "personId": 170669 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Bayreuth", + "institution": "University of Bayreuth", + "dsl": "" + } + ], + "personId": 169990 + } + ] + }, + { + "id": 170990, + "typeId": 13748, + "title": "Can a Smartwatch Move Your Fingers? Compact and Practical Electrical Muscle Stimulation in a Smartwatch", + "award": "HONORABLE_MENTION", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676373" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5237", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175060, + 171050 + ], + "eventIds": [], + "abstract": "Smartwatches gained popularity in the mainstream, making them into today’s de-facto wearables. Despite advancements in sensing, haptics on smartwatches is still restricted to tactile feedback (e.g., vibration). Most smartwatch-sized actuators cannot render strong force-feedback. Simultaneously, electrical muscle stimulation (EMS) promises compact force-feedback but, to actuate fingers requires users to wear many electrodes on their forearms. While forearm electrodes provide good accuracy, they detract EMS from being a practical force-feedback interface. To address this, we propose moving the electrodes to the wrist—conveniently packing them in the backside of a smartwatch. In our first study, we found that by cross-sectionally stimulating the wrist in 1,728 trials, we can actuate thumb extension, index extension & flexion, middle flexion, pinky flexion, and wrist flexion. Following, we engineered a compact EMS that integrates directly into a smartwatch’s wristband (with a custom stimulator, electrodes, demultiplexers, and communication). In our second study, we found that participants could calibrate our device by themselves ~50% faster than with conventional EMS. Furthermore, all participants preferred the experience of this device, especially for its social acceptability & practicality. We believe that our approach opens new applications for smartwatch-based interactions, such as haptic assistance during everyday tasks.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169989 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170155 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170095 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University Of Chicago", + "dsl": "" + } + ], + "personId": 170334 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169862 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170376 + } + ] + }, + { + "id": 170991, + "typeId": 13748, + "title": "Computational Trichromacy Reconstruction: Empowering the Color-Vision Deficient to Recognize Colors Using Augmented Reality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676415" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-3297", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171024, + 175078 + ], + "eventIds": [], + "abstract": "We propose an assistive technology that helps individuals with Color Vision Deficiencies (CVD) to recognize/name colors.\r\nA dichromat's color perception is a reduced two-dimensional (2D) subset of a normal\r\ntrichromat's three dimensional color (3D) perception, leading to confusion when visual stimuli that appear identical to the dichromat are referred to by different color names.\r\nUsing our proposed system, CVD individuals can interactively induce distinct perceptual changes to originally confusing colors via a computational color space transformation.\r\nBy combining their original 2D precepts for colors with the discriminative changes, a three dimensional color space is reconstructed, where the dichromat can learn to resolve color name confusions and accurately recognize colors.\r\nOur system is implemented as an Augmented Reality (AR) interface on smartphones, where users interactively control the rotation through swipe gestures and observe the induced color shifts in the camera view or in a displayed image. Through psychophysical experiments and a longitudinal user study, we demonstrate that such rotational color shifts have discriminative power (initially confusing colors become distinct under rotation) and exhibit structured perceptual shifts dichromats can learn with modest training. The AR App is also evaluated in two real-world scenarios (building with lego blocks and interpreting artistic works); users all report positive experience in using the App to recognize object colors that they otherwise could not.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Rochester", + "institution": "University of Rochester", + "dsl": "Department of Computer Science" + } + ], + "personId": 170411 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Rochester", + "institution": "University of Rochester", + "dsl": "Department of Computer Science" + } + ], + "personId": 170036 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Rochester", + "institution": "University of Rochester", + "dsl": "Goergen Institute for Data Science" + } + ], + "personId": 170015 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Rochester", + "institution": "University of Rochester", + "dsl": "Department of Computer Science" + } + ], + "personId": 169909 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Rochester", + "institution": "University of Rochester", + "dsl": "Department of Electrical and Computer Engineering" + } + ], + "personId": 169826 + } + ] + }, + { + "id": 170992, + "typeId": 13744, + "title": "Demonstrating VibraForge: An Open-source Vibrotactile Prototyping Toolkit with Scalable Modular Design", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686764" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1051", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Haptics", + "Vibrotactile", + "Toolkits", + "Wearable Devices" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We demonstrate VibraForge, an open-source vibrotactile prototyping toolkit that supports fine-grained control of up to 120 vibrotactile actuators. Our solution features modular and scalable design principles, self-contained vibration units, chain-connection topology, and custom communication protocol. Additionally, we offer a GUI editor for intuitive multi-actuator pattern authoring. This toolkit significantly lowers the barriers to haptic design and expands the design space for multi-actuator applications.\r\n\r\nIn our demonstration, participants will have the chance to experience the full process of designing vibrotactile systems from scratch using VibraForge. This includes assembling vibrotactile wearable devices, mounting them on clothes, designing vibration patterns, and real-time evaluation. Our goal is to showcase the transformative potential of multi-actuator systems and lower the barriers to haptic design for researchers and designers.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Dynamic Graphics Project Lab" + } + ], + "personId": 170102 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Electrical and Computer Engineering" + } + ], + "personId": 169712 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Division of Engineering Science" + } + ], + "personId": 169994 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Mechanical & Industrial Engineering" + } + ], + "personId": 170715 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Mechanical Engineering " + } + ], + "personId": 170177 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "The University of Toronto", + "dsl": "DGP" + } + ], + "personId": 170206 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170207 + } + ] + }, + { + "id": 170993, + "typeId": 13744, + "title": "WatchThis: A Wearable Point-and-Ask Interface powered by Vision-Language Models for Contextual Queries", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686776" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1053", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "wearable", + "camera", + "watch", + "pointing", + "vision language model" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "This paper introduces WatchThis, a novel wearable device that enables natural language interactions with real-world objects and environments through pointing gestures. Building upon previous work in gesture-based computing interfaces, WatchThis leverages recent advancements in Large Language Models (LLM) and Vision Language Models (VLM) to create a hands-free, contextual querying system. The prototype consists of a wearable watch with a rotating, flip-up camera that captures the area of interest when pointing, allowing users to ask questions about their surroundings in natural language. This design addresses limitations of existing systems that require specific commands or occupy the hands, while also maintaining a non-discrete form factor for social awareness. The paper explores various applications of this point-and-ask interaction, including object identification, translation, and instruction queries. By utilizing off-the-shelf components and open-sourcing the design, this work aims to facilitate further research and development in wearable, AI-enabled interaction paradigms.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT Media Lab", + "dsl": "" + } + ], + "personId": 170364 + } + ] + }, + { + "id": 170994, + "typeId": 13748, + "title": "GPTVoiceTasker: Advancing Multi-step Mobile Task Efficiency Through Dynamic Interface Exploration and Learning", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676356" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1439", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175076, + 171053 + ], + "eventIds": [], + "abstract": "Virtual assistants have the potential to play an important role in helping users achieve different tasks. However, these systems face challenges in their real-world usability, characterized by inefficiency and struggles in grasping user intentions. Leveraging recent advances in Large Language Models (LLMs), we introduce GPTVoiceTasker, a virtual assistant poised to enhance user experiences and task efficiency on mobile devices. GPTVoiceTasker excels at intelligently deciphering user commands and executing relevant device interactions to streamline task completion. For unprecedented tasks, GPTVoiceTasker utilises the contextual information and on-screen content to continuously explore and execute the tasks. In addition, the system continually learns from historical user commands to automate subsequent task invocations, further enhancing execution efficiency. From our experiments, GPTVoiceTasker achieved 84.5% accuracy in parsing human commands into executable actions and 85.7% accuracy in automating multi-step tasks. In our user study, GPTVoiceTasker boosted task efficiency in real-world scenarios by 34.85%, accompanied by positive participant feedback. We made GPTVoiceTasker open-source, inviting further research into LLMs utilization for diverse tasks through prompt engineering and leveraging user usage data to improve efficiency.", + "authors": [ + { + "affiliations": [ + { + "country": "Australia", + "state": "Victoria", + "city": "Clayton", + "institution": "CSIRO's Data61", + "dsl": "" + } + ], + "personId": 170652 + }, + { + "affiliations": [ + { + "country": "Australia", + "state": "VIC", + "city": "Melbourne", + "institution": "Monash University", + "dsl": "Faculty of IT" + } + ], + "personId": 170133 + }, + { + "affiliations": [ + { + "country": "Australia", + "state": "New South Wales", + "city": "Sydney", + "institution": "CSIRO's Data61", + "dsl": "" + } + ], + "personId": 169730 + }, + { + "affiliations": [ + { + "country": "Australia", + "state": "", + "city": "Melbourne", + "institution": "Monash University", + "dsl": "Faculty of IT" + } + ], + "personId": 170488 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hong Kong", + "institution": "City University of Hong Kong", + "dsl": "School of Creative Media" + } + ], + "personId": 169756 + }, + { + "affiliations": [ + { + "country": "Australia", + "state": "ACT", + "city": "ACTON", + "institution": "CSIRO's Data61 & Australian National University", + "dsl": "" + } + ], + "personId": 169799 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Heilbronn", + "institution": "Technical University of Munich", + "dsl": "School of Computation, Information and Technology" + } + ], + "personId": 170348 + } + ] + }, + { + "id": 170995, + "typeId": 13748, + "title": "CARDinality: Interactive Card-shaped Robots with Locomotion and Haptics using Vibration", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676421" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4824", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171026, + 175087 + ], + "eventIds": [], + "abstract": "This paper introduces a novel approach to interactive robots by leveraging the form-factor of cards to create thin robots equipped with vibrational capabilities for locomotion and haptic feedback. The system is composed of flat-shaped robots with on-device sensing and wireless control, which offer lightweight portability and scalability. This research introduces a hardware prototype to explore the possibility of ‘vibration-based omni-directional sliding locomotion’. Applications include augmented card playing, educational tools, and assistive technology, which showcase CARDinality’s versatility in tangible interaction.\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Department of Computer Science" + } + ], + "personId": 169974 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "Department of Computer Science" + } + ], + "personId": 170540 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170271 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Taipei", + "institution": "National Taiwan University", + "dsl": "" + } + ], + "personId": 169947 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 170996, + "typeId": 13748, + "title": "TRAvel Slicer: Continuous Extrusion Toolpaths for 3D Printing", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676349" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1437", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171040, + 175080 + ], + "eventIds": [], + "abstract": "In this paper we present Travel Reduction Algorithm (TRAvel) Slicer, which minimizes travel movements in 3D printing. Conventional slicing software generates toolpaths with many travel movements--movements without material extrusion. Some 3D printers are incapable of starting and stopping extrusion and it is difficult to impossible to control the extrusion of many materials. This makes toolpaths with travel movements unsuitable for a wide range of printers and materials. \r\n\r\nWe developed the open-source TRAvel Slicer to enable the printing of complex 3D models on a wider range of printers and in a wider range of materials than is currently possible. TRAvel Slicer minimizes two different kinds of travel movements--what we term Inner- and Outer-Model travel. We minimize Inner-Model travel (travel within the 3D model) by generating space-filling Fermat spirals for each contiguous planar region of the model. We minimize Outer-Model travel (travels outside of the 3D model) by ordering the printing of different branches of the model, thus limiting transitions between branches. We present our algorithm and software and then demonstrate how: 1) TRAvel Slicer makes it possible to generate high-quality prints from a metal-clay material, CeraMetal, that is functionally unprintable using an off-the-shelf slicer. 2) TRAvel Slicer dramatically increases the printing efficiency of traditional plastic 3D printing compared to an off-the-shelf slicer.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New Mexico", + "city": "Albuquerque", + "institution": "University of New Mexico", + "dsl": "Hand and Machine, Department of Computer Science" + } + ], + "personId": 170645 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New Mexico", + "city": "Albuquerque", + "institution": "University of New Mexico", + "dsl": "Hand and Machine Lab" + } + ], + "personId": 169875 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New Mexico", + "city": "Albuquerque", + "institution": "University of New Mexico", + "dsl": "Department of Computer Science" + } + ], + "personId": 170691 + } + ] + }, + { + "id": 170997, + "typeId": 13744, + "title": "ChainBuddy: An AI-assisted Agent System for Helping Users Set up LLM Pipelines", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686763" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1054", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "large language models, agentic system, intelligent user interfaces" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "As large language models (LLMs) continue to advance, their potential applications have grown significantly. However, one persistent challenge remains: evaluating LLM behavior and crafting effective prompt chains. Many users struggle with where to start, often referred to as the \"blank page problem.\" ChainBuddy, a new evaluation assistant built into the ChainForge platform, aims to tackle this issue. It offers a straightforward and user-friendly way to plan and evaluate LLM behavior, making the process less daunting and more accessible.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Quebec", + "city": "Montreal", + "institution": "Université de montréal", + "dsl": "Département d'informatique et de recherche opérationnelle" + }, + { + "country": "Canada", + "state": "Quebec", + "city": "Montreal", + "institution": "Mila - Quebec AI Institute", + "dsl": "" + } + ], + "personId": 169836 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Quebec", + "city": "Montréal", + "institution": "Université de Montréal", + "dsl": "Montréal HCI" + } + ], + "personId": 170346 + } + ] + }, + { + "id": 170998, + "typeId": 13744, + "title": "What is Affective Touch Made Of? A Soft Capacitive Sensor Array Reveals Interplay between Shear, Pressure and Individuality", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1056", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "In affective human-robot interaction, soft polymer tactile sensing arrays provide a customizable method for capturing expressive gestural touch parameters across surfaces of varying shape and stiffness. In this demonstration, we introduce a flexible, reconfigurable, and soft 36-taxel array, which detects multitouch normal and two-dimensional shear stresses at ranges of 1.5kPa-43kPa and ±0.3-3.8kPa, respectively, wirelessly at 43Hz (1548 taxels/s). When the sensor was tested on a flat surface in a deep-learning classification of nine gestures (N=16), the addition of shear data increased accuracy to 88%, compared to 80% with normal stress data alone, confirming the importance of shear stress's expressive centrality. A preliminary test revealed that the trained model can, to some extent, recognize gestures performed on the sensor on a curved soft surface. We examine the interaction between sensed-touch features, gesture attributes, and individual differences, propose affective-touch sensing requirements, and discuss technical considerations for performance and practicality.\r\n\r\n\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 170700 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science" + } + ], + "personId": 170275 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 170104 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science" + } + ], + "personId": 169871 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science, SPIN" + } + ], + "personId": 170569 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "School of Biomedical Engineering" + } + ], + "personId": 170524 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver ", + "institution": "University of British Columbia ", + "dsl": "Computer Science" + } + ], + "personId": 170219 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 170418 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science" + } + ], + "personId": 170285 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 169713 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical & Computer Engineering" + } + ], + "personId": 169693 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science" + } + ], + "personId": 169926 + } + ] + }, + { + "id": 170999, + "typeId": 13744, + "title": "Manipulate to Obfuscate: A Privacy-Focused Intelligent Image Manipulation Tool for End-Users", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686778" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1058", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "image privacy, usable security, generative artificial intelligence, image obfuscation, social media" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Image-related privacy preservation techniques often demand significant technical expertise, creating a barrier for end-users. We present a privacy-focused intelligent image manipulation tool that leverages recent advancements in generative AI to lower this barrier. Our functional prototype allows users to express their privacy concerns, identify potential privacy risks in images, and recommends relevant AI-powered obfuscation techniques to mitigate these risks and concerns. We demonstrate the tool's versatility across multiple different domains, showcasing its potential to empower users in managing their privacy across various contexts. This demonstration presents the concept, user workflow, and implementation details of our prototype, highlighting its potential to bridge the gap between privacy research and practical, user-facing tools for privacy-preserving image sharing.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 169818 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Beijing", + "city": "Beijing", + "institution": "Tsinghua University", + "dsl": "" + }, + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "" + } + ], + "personId": 170694 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Pennsylvania", + "city": "Pittsburgh", + "institution": "Carnegie Mellon University", + "dsl": "Human-Computer Interaction Institute" + } + ], + "personId": 169903 + } + ] + }, + { + "id": 171000, + "typeId": 13748, + "title": "Exploring the Effects of Sensory Conflicts on Cognitive Fatigue in VR Remappings", + "award": "HONORABLE_MENTION", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676439" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9373", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171044, + 175085 + ], + "eventIds": [], + "abstract": "Virtual reality (VR) is found to present significant cognitive challenges due to its immersive nature and frequent sensory conflicts. This study systematically investigates the impact of sensory conflicts induced by VR remapping techniques on cognitive fatigue, and unveils their correlation. We utilized three remapping methods (haptic repositioning, head-turning redirection, and giant resizing) to create different types of sensory conflicts, and measured perceptual thresholds to induce various intensities of the conflicts. Through experiments involving cognitive tasks along with subjective and physiological measures, we found that all three remapping methods influenced the onset and severity of cognitive fatigue, with visual-vestibular conflict having the greatest impact. Interestingly, visual-experiential/memory conflict showed a mitigating effect on cognitive fatigue, emphasizing the role of novel sensory experiences. This study contributes to a deeper understanding of cognitive fatigue under sensory conflicts and provides insights for designing VR experiences that align better with human perceptual and cognitive capabilities. ", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Institute of Software", + "dsl": "Chinese Academy of Sciences" + }, + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "College of Computer Science and Technology", + "dsl": "University of Chinese Academy of Sciences" + } + ], + "personId": 169719 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing, China", + "institution": "Beihang University", + "dsl": "" + } + ], + "personId": 170470 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "School of Artificial Intelligence", + "dsl": "Beijing Normal University" + } + ], + "personId": 170181 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Information Engineering College,Capital Normal University", + "dsl": "" + } + ], + "personId": 170710 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Beijing Normal University", + "dsl": "School of Artificial Intelligence" + } + ], + "personId": 170313 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Institute of Software, Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 169753 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Beijing", + "institution": "Institute of software, Chinese Academy of Sciences", + "dsl": "" + } + ], + "personId": 170289 + } + ] + }, + { + "id": 171001, + "typeId": 13748, + "title": "FathomGPT: A Natural Language Interface for Interactively Exploring Ocean Science Data", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676462" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9494", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175083, + 171048 + ], + "eventIds": [], + "abstract": "We introduce FathomGPT, an open source system for the interactive investigation of ocean science data via a natural language interface. FathomGPT was developed in close collaboration with marine scientists to enable researchers and ocean enthusiasts to explore and analyze the FathomNet image database. FathomGPT provides a custom information retrieval pipeline that leverages OpenAI’s large language models to enable: the creation of complex queries to retrieve images, taxonomic information, and scientific measurements; mapping common names and morphological features to scientific names; generating interactive charts on demand; and searching by image or specified patterns within an image. In designing FathomGPT, particular emphasis was placed on enhancing the user's experience by facilitating free-form exploration and optimizing response times. We present an architectural overview and implementation details of FathomGPT, along with a series of ablation studies that demonstrate the effectiveness of our approach to name resolution, fine tuning, and prompt modification. Additionally, we present usage scenarios of interactive data exploration sessions and document feedback from ocean scientists and machine learning experts.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 170314 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 169895 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 170450 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 170246 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 170629 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Moss Landing", + "institution": "Monterey Bay Aquarium Research Institute", + "dsl": "Research & Development" + } + ], + "personId": 169858 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Indiana", + "city": "West Lafayette", + "institution": "Purdue University", + "dsl": "" + } + ], + "personId": 169851 + } + ] + }, + { + "id": 171002, + "typeId": 13748, + "title": "Hands-on, Hands-off: Gaze-Assisted Bimanual 3D Interaction", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676331" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-4479", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175074, + 171031 + ], + "eventIds": [], + "abstract": "Extended Reality (XR) systems with hand-tracking support direct manipulation of objects with both hands. A common interaction in this context is for the non-dominant hand (NDH) to orient an object for input by the dominant hand (DH). We explore bimanual interaction with gaze through three new modes of interaction where the input of the NDH, DH, or both hands is indirect based on Gaze+Pinch. These modes enable a new dynamic interplay between our hands, allowing flexible alternation between and pairing of complementary operations. Through applications, we demonstrate several use cases in the context of 3D modelling, where users exploit occlusion-free, low-effort, and fluid two-handed manipulation. To gain a deeper understanding of each mode, we present a user study on an asymmetric rotate-translate task. Most participants preferred indirect input with both hands for lower physical effort, without a penalty on user performance. Otherwise, they preferred modes where the NDH oriented the object directly, supporting preshaping of the hand, which is more challenging with indirect gestures. The insights gained are of relevance for the design of XR interfaces that aim to leverage eye and hand input in tandem.", + "authors": [ + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169937 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170592 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 170367 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170507 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Google", + "dsl": "" + } + ], + "personId": 170006 + }, + { + "affiliations": [ + { + "country": "United Kingdom", + "state": "", + "city": "Lancaster", + "institution": "Lancaster University", + "dsl": "" + }, + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "" + } + ], + "personId": 169997 + }, + { + "affiliations": [ + { + "country": "Denmark", + "state": "", + "city": "Aarhus", + "institution": "Aarhus University", + "dsl": "Department of Computer Science" + } + ], + "personId": 169856 + } + ] + }, + { + "id": 171003, + "typeId": 13748, + "title": "StreetNav: Leveraging Street Cameras to Support Precise Outdoor Navigation for Blind Pedestrians", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676333" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2852", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171025, + 175075 + ], + "eventIds": [], + "abstract": "Blind and low-vision (BLV) people rely on GPS-based systems for outdoor navigation. GPS's inaccuracy, however, causes them to veer off track, run into obstacles, and struggle to reach precise destinations. While prior work has made precise navigation possible indoors via hardware installations, enabling this outdoors remains a challenge. Interestingly, many outdoor environments are already instrumented with hardware such as street cameras. In this work, we explore the idea of repurposing existing street cameras for outdoor navigation. Our community-driven approach considers both technical and sociotechnical concerns through engagements with various stakeholders: BLV users, residents, business owners, and Community Board leadership. The resulting system, StreetNav, processes a camera's video feed using computer vision and gives BLV pedestrians real-time navigation assistance. Our evaluations show that StreetNav guides users more precisely than GPS, but its technical performance is sensitive to environmental occlusions and distance from the camera. We discuss future implications for deploying such systems at scale.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University", + "dsl": "" + } + ], + "personId": 170301 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University ", + "dsl": "Computer Science - School of Engineering and Applied Sciences - Computer-Enabled Abilities Laboratory" + } + ], + "personId": 170385 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University", + "dsl": "Computer Science/Fu Foundation School of Engineering and Applied Science/Computer-Enabled Abilities Laboratory" + } + ], + "personId": 170029 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York ", + "institution": "Columbia University ", + "dsl": "Masters in Computer Science" + } + ], + "personId": 170397 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University", + "dsl": "" + } + ], + "personId": 169789 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University", + "dsl": "Electrical Engineering, WiMNet and NEAL lab" + } + ], + "personId": 170203 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Bronx", + "institution": "Lehman College", + "dsl": "" + } + ], + "personId": 170265 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "New York University", + "dsl": "" + } + ], + "personId": 170086 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "Claremont", + "institution": "Pomona College", + "dsl": "Computer Science" + } + ], + "personId": 170640 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "Brooklyn ", + "institution": "New York City College of Technology", + "dsl": "" + } + ], + "personId": 170125 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University", + "dsl": "Department of Electrical Engineering" + } + ], + "personId": 170065 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University", + "dsl": "Electrical Engineering" + } + ], + "personId": 170666 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University", + "dsl": "Electrical Engineering" + } + ], + "personId": 170337 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University ", + "dsl": "Electrical Engineering" + } + ], + "personId": 169866 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New York", + "city": "New York", + "institution": "Columbia University", + "dsl": "" + } + ], + "personId": 170630 + } + ] + }, + { + "id": 171004, + "typeId": 13744, + "title": "A Demo of DIAM: Drone-based Indoor Accessibility Mapping", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686782" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1060", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Indoor Accessibility Mapping, Indoor Scanning, Drone, 3D reconstruction" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "Indoor mapping data is crucial for navigation and accessibility, yet such data are widely lacking due to the manual labor and expense of data collection, especially for larger indoor spaces. In this demo paper, we introduce Drone-based Indoor Accessibility Mapping (DIAM), a drone-based indoor scanning system that efficiently produces 3D reconstructions of indoor spaces with automatically recognized and located accessibility features/barriers such as stairs, elevators, and doors automatically. With DIAM, our goal is to scan indoor spaces quickly and generate a precise, detailed, and visual 3D indoor accessibility map. We describe DIAM's system design, present its technical capabilities, and discuss future use cases.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science and Engineering" + } + ], + "personId": 169981 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Human Centered Design & Engineering" + } + ], + "personId": 169869 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington ", + "dsl": "Computer Science" + } + ], + "personId": 169743 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science and Engineering" + } + ], + "personId": 169684 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170537 + } + ] + }, + { + "id": 171005, + "typeId": 13744, + "title": "MobiPrint: A Mobile 3D Printer for Environment-Scale Design and Fabrication", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1062", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "3D printing is transforming how we customize and create physical objects in engineering, accessibility, and art. However, this technology is still primarily limited to confined working areas and dedicated print beds thereby detaching design and fabrication from real-world environments and making measuring and scaling objects tedious and labor-intensive. In this paper, we present MobiPrint, a prototype mobile fabrication system that combines elements from robotics, architecture, and Human-Computer Interaction (HCI) to enable environment-scale design and fabrication in ad-hoc indoor environments. MobiPrint provides a multi-stage fabrication pipeline: first, the robotic 3D printer automatically maps an indoor space using LiDAR scanning; second, a custom design tool converts the map into an interactive CAD canvas for 3D modeling and physical-world model placement; finally, the MobiPrint robot prints the object directly on the ground at the defined location. Through a \"proof-by-demonstration\" validation, we highlight our system’s potential across different applications, including accessibility, home furnishing, floor signage, and art. We also conduct a technical evaluation to assess MobiPrint’s localization accuracy, ground surfaces adhesion, payload capacity, and mapping speed and discuss how our prototype could inform future work in mobile fabrication that aims to integrate custom 3D designs into real-world settings.\r\n\r\n", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science" + } + ], + "personId": 169904 + } + ] + }, + { + "id": 171006, + "typeId": 13748, + "title": "StructCurves: Interlocking Block-Based Line Structures", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676354" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-5329", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175068, + 171052 + ], + "eventIds": [], + "abstract": "We present a new class of curved block-based line structures whose component chains are flexible when separated, and provably rigid when assembled together into an interlocking double chain. The joints are inspired by traditional zippers, where a binding fabric or mesh connects individual teeth.\r\nUnlike traditional zippers, the joint design produces a rigid interlock with programmable curvature. This allows fairly strong curved structures to be built out of easily stored flexible chains. \r\nIn this paper, we introduce a pipeline for generating these curved structures using a novel block design template based on revolute joints. \r\nMesh embedded in these structures maintains block spacing and assembly order. We evaluate the rigidity of the curved structures through mechanical performance testing and demonstrate several applications. ", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "Boston University", + "dsl": "Computer Science" + } + ], + "personId": 170438 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "New Hampshire", + "city": "Hanover", + "institution": "Dartmouth College", + "dsl": "Computer Science" + } + ], + "personId": 169714 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "Boston University", + "dsl": "Computer Science" + } + ], + "personId": 170069 + } + ] + }, + { + "id": 171007, + "typeId": 13748, + "title": "X-Hair: 3D Printing Hair-like Structures with Multi-form, Multi-property and Multi-function", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676360" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1767", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171020, + 175086 + ], + "eventIds": [], + "abstract": "In this paper, we present X-Hair, a method that enables 3D-printed hair with various forms, properties, and functions. We developed a two-step suspend printing strategy to fabricate hair-like structures in different forms (e.g. fluff, bristle, barb) by adjusting parameters including Extrusion Length Ratio and Total Length. Moreover, a design tool is also established for users to customize hair-like structures with various properties (e.g. pointy, stiff, soft) on imported 3D models, which virtually shows the results for previewing and generates G-code files for 3D printing. We demonstrate the design space of X-Hair and evaluate the properties of them with different parameters. Through a series of applications with hair-like structures, we validate X-hair's practical usage of biomimicry, decoration, heat preservation, adhesion, and haptic interaction.", + "authors": [ + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170103 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170034 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "College of Computer Science and Technology" + } + ], + "personId": 170045 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170211 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170333 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169768 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 170513 + }, + { + "affiliations": [ + { + "country": "China", + "state": "Zhejiang", + "city": "Hangzhou", + "institution": "Hangzhou City University", + "dsl": "" + } + ], + "personId": 169784 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "" + } + ], + "personId": 169915 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Zhejiang University", + "dsl": "International Design Institute" + } + ], + "personId": 169901 + }, + { + "affiliations": [ + { + "country": "China", + "state": "", + "city": "Hangzhou", + "institution": "Hangzhou City University", + "dsl": "" + } + ], + "personId": 170366 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Cambridge", + "institution": "MIT", + "dsl": "CSAIL" + } + ], + "personId": 170012 + } + ] + }, + { + "id": 171008, + "typeId": 13755, + "title": "Parasitic or Symbiotic? Redefining our Relationship with Intelligent Systems", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3586182.3695611" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24f-8505", + "source": "PCS", + "trackId": 13201, + "tags": [], + "keywords": [], + "sessionIds": [ + 171039 + ], + "eventIds": [], + "abstract": "My UIST vision is to fundamentally change our approach to designing intelligent interactive systems. Rather than creating parasitic systems, our goal should be to create “human-computer partnerships” that establish symbiotic relationships between artificial intelligence (AI) and human users. This requires assessing the impact of users interacting with intelligent systems over the short, medium and long term. We also need to ensure that users control their level of agency, ranging from delegation to retaining full control. Finally, we need to understand how users and AI systems affect each other’s behavior over time. This implies we need to explicitly support “reciprocal co-adaptation” where users both learn from and appropriate (adapt and adapt to) intelligent systems, and those systems in turn both learn from and affect users over time", + "authors": [ + { + "affiliations": [ + { + "country": "France", + "state": "none", + "city": "Gif-sur-Yvette", + "institution": "Université Paris-Saclay", + "dsl": "LISN" + }, + { + "country": "France", + "state": "none", + "city": "Gif-sur-Yvette", + "institution": "Inria", + "dsl": "ExSitu" + } + ], + "personId": 169893 + } + ] + }, + { + "id": 171009, + "typeId": 13748, + "title": "VizAbility: Enhancing Chart Accessibility with LLM-based Conversational Interaction", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676414" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-9380", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171024, + 175078 + ], + "eventIds": [], + "abstract": "Traditional accessibility methods like alternative text and data tables typically underrepresent data visualization's full potential. Keyboard-based chart navigation has emerged as a potential solution, yet efficient data exploration remains challenging. We present VizAbility, a novel system that enriches chart content navigation with conversational interaction, enabling users to use natural language for querying visual data trends. VizAbility adapts to the user's navigation context for improved response accuracy and facilitates verbal command-based chart navigation. Furthermore, it can address queries for contextual information, designed to address the needs of visually impaired users. We designed a large language model (LLM)-based pipeline to address these user queries, leveraging chart data & encoding, user context, and external web knowledge. We conducted both qualitative and quantitative studies to evaluate VizAbility's multimodal approach. We discuss further opportunities based on the results, including improved benchmark testing, incorporation of vision models, and integration with visualization workflows.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Chestnut Hill", + "institution": "Boston College", + "dsl": "Computer Science Department" + } + ], + "personId": 170117 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Boston", + "institution": "MIT", + "dsl": "Department of Electrical Engineering and Computer Science" + } + ], + "personId": 170617 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Chestnut Hill", + "institution": "Boston College", + "dsl": "computer science" + } + ], + "personId": 170660 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Massachusetts", + "city": "Chestnut Hill", + "institution": "Boston College", + "dsl": "Computer Science" + } + ], + "personId": 170611 + } + ] + }, + { + "id": 171010, + "typeId": 13748, + "title": "What is Affective Touch Made Of? A Soft Capacitive Sensor Array Reveals the Interplay between Shear, Normal Stress and Individuality", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676346" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8730", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171044, + 175085 + ], + "eventIds": [], + "abstract": "Humans physically express emotion by modulating parameters that register on mammalian skin mechanoreceptors, but are unavailable in current touch-sensing technology. \r\nGreater sensory richness combined with data on affect-expression composition is a prerequisite to estimating affect from touch, with applications including physical human-robot interaction. To examine shear alongside more easily captured normal stresses, we tailored recent capacitive technology to attain performance suitable for affective touch, creating a flexible, reconfigurable and soft 36-taxel array that detects multitouch normal and 2-dimensional shear at ranges of 1.5kPa-43kPa and $\\pm$ 0.3-3.8kPa respectively, wirelessly at ~43Hz (1548 taxels/s). In a deep-learning classification of 9 gestures (N=16), inclusion of shear data improved accuracy to 88\\%, compared to 80\\% with normal stress data alone, confirming shear stress's expressive centrality. \r\nUsing this rich data, we analyse the interplay of sensed-touch features, gesture attributes and individual differences, propose affective-touch sensing requirements, and share technical considerations for performance and practicality.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science" + } + ], + "personId": 170275 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 170104 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 170700 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science" + } + ], + "personId": 169871 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science, SPIN" + } + ], + "personId": 170569 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "School of Biomedical Engineering" + } + ], + "personId": 170524 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver ", + "institution": "University of British Columbia ", + "dsl": "Computer Science" + } + ], + "personId": 170219 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 170418 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science" + } + ], + "personId": 170285 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical and Computer Engineering" + } + ], + "personId": 169713 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Electrical & Computer Engineering" + } + ], + "personId": 169693 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "British Columbia", + "city": "Vancouver", + "institution": "University of British Columbia", + "dsl": "Computer Science" + } + ], + "personId": 169926 + } + ] + }, + { + "id": 171011, + "typeId": 13744, + "title": "Pro-Tact: Hierarchical Synthesis of Proprioception and Tactile Exploration for Eyes-Free Ray Pointing on Out-of-View VR Menus", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1067", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We introduce Pro-Tact, a novel eyes-free pointing technique for interacting with out-of-view (OoV) VR menus. The technique combines rapid rough point using proprioception and fine-grain adjustment with tactile exploration to enable menu interaction without requiring visual attention. Our user study demonstrated that Pro-Tact allowed users to select menu items accurately (95\\% for 54 items) in an eyes-free manner with reduced fatigue and sickness compared to eyes-engaged condition. Participants also expressed a preference for eyes-free interaction employing Pro-Tact in practical VR application scenarios. This research contributes by introducing a novel interaction technique and quantitatively evaluating its benefits in terms of performance and user experience in OoV menu interaction.", + "authors": [ + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169930 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169777 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab, School of Computing" + } + ], + "personId": 170398 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 170628 + }, + { + "affiliations": [ + { + "country": "Korea, Republic of", + "state": "", + "city": "Daejeon", + "institution": "School of Computing, KAIST", + "dsl": "HCI Lab" + } + ], + "personId": 169896 + } + ] + }, + { + "id": 171012, + "typeId": 13748, + "title": "CoLadder: Manipulating Code Generation via Multi-Level Blocks", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676357" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-8053", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171055, + 175081 + ], + "eventIds": [], + "abstract": "This paper adopted an iterative design process to gain insights into programmers' strategies when using LLMs for programming. We proposed CoLadder, a novel system that supports programmers by facilitating hierarchical task decomposition, direct code segment manipulation, and result evaluation during prompt authoring. A user study with 12 experienced programmers showed that CoLadder is effective in helping programmers externalize their problem-solving intentions flexibly, improving their ability to evaluate and modify code across various abstraction levels, from their task's goal to final code implementation.", + "authors": [ + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "David R. Cheriton School of Computer Science" + } + ], + "personId": 170459 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "School of Computer Science" + } + ], + "personId": 170007 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Toronto", + "institution": "University of Toronto", + "dsl": "Department of Computer Science" + } + ], + "personId": 170040 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Diego", + "institution": "University of California, San Diego", + "dsl": "Department of Cognitive Science and Design Lab" + } + ], + "personId": 170393 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Ontario", + "city": "Waterloo", + "institution": "University of Waterloo", + "dsl": "School of Computer Science" + } + ], + "personId": 169723 + } + ] + }, + { + "id": 171013, + "typeId": 13756, + "title": "Empathy-GPT: Leveraging Large Language Models to Enhance Emotional Empathy and User Engagement in Embodied Conversational Agents", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686729" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24d-1499", + "source": "PCS", + "trackId": 13203, + "tags": [], + "keywords": [], + "sessionIds": [ + 171027 + ], + "eventIds": [], + "abstract": "Emotional empathy, the ability to understand and respond to others’ emotions, is essential for effective communication. We propose Empathy-GPT, featuring embodied conversational agents with empathic capacity. To address the limitations of rule-based conversational agents, we leverage contextual understanding and adaptation capabilities of large language models (LLMs) to coordinate multiple modalities (e.g., agent's tone, body movements, and facial expressions). To enhance user engagement in human-agent communication, agents dynamically respond to users' voices and facial expressions, providing contextually empathic responses.", + "authors": [ + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Hsinchu", + "institution": "National Yang Ming Chiao Tung University", + "dsl": "Institute of Computer Science and Engineering" + }, + { + "country": "Taiwan", + "state": "", + "city": "Hsinchu", + "institution": "National Yang Ming Chiao Tung University", + "dsl": "Institute of Biomedical Engineering" + } + ], + "personId": 170453 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Hsinchu", + "institution": " National Yang Ming Chiao Tung University", + "dsl": "" + }, + { + "country": "Taiwan", + "state": "", + "city": "Hsinchu", + "institution": " National Yang Ming Chiao Tung University", + "dsl": "" + } + ], + "personId": 170440 + }, + { + "affiliations": [ + { + "country": "Taiwan", + "state": "", + "city": "Hsinchu", + "institution": "Institute of Computer Science and Engineering", + "dsl": "National Yang Ming Chiao Tung University" + }, + { + "country": "Taiwan", + "state": "", + "city": "Hsinchu", + "institution": "Institute of Computer Science and Engineering", + "dsl": "National Yang Ming Chiao Tung University" + } + ], + "personId": 170281 + } + ] + }, + { + "id": 171014, + "typeId": 13744, + "title": "SHAPE-IT: Exploring Text-to-Shape-Display for Generative Shape-Changing Behaviors with LLMs", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1070", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "This paper introduces text-to-shape-display, a novel approach to generating dynamic shape changes in pin-based shape displays through natural language commands. By leveraging large language models (LLMs), our approach allows users to author shape-changing behaviors on demand through text prompts without programming. We describe the foundational aspects necessary for such a system, including the identification of key generative elements (primitive, animation, and interaction) and design requirements to enhance user interaction, based on formative exploration and iterative design processes. Based on these insights, we develop SHAPE-IT, an LLM-based authoring tool for a 24 x 24 shape display, which translates the user's textual command into executable code and allows for quick exploration through a web-based control interface. We evaluate the effectiveness of SHAPE-IT in two ways: 1) performance evaluation and 2) user evaluation (N= 10). The study conclusions highlight the ability to facilitate rapid ideation of a wide range of shape-changing behaviors with AI. However, the findings also expose accuracy-related challenges and limitations, prompting further exploration into refining the framework for leveraging AI to better suit the unique requirements of shape-changing systems.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169803 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 169959 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170271 + }, + { + "affiliations": [ + { + "country": "Canada", + "state": "Alberta", + "city": "Calgary", + "institution": "University of Calgary", + "dsl": "" + } + ], + "personId": 169785 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Illinois", + "city": "Chicago", + "institution": "University of Chicago", + "dsl": "" + } + ], + "personId": 170244 + } + ] + }, + { + "id": 171015, + "typeId": 13744, + "title": "Demo of FlowRing: Seamless Cross-Surface Interaction via Opto-Acoustic Ring", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3672539.3686744" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24b-1075", + "source": "PCS", + "trackId": 13205, + "tags": [], + "keywords": [ + "Input Technology", + "2D Tracking", + "Microgestures Detection", + "Ring" + ], + "sessionIds": [ + 171035 + ], + "eventIds": [], + "abstract": "We demonstrate FlowRing, a ring-form-factor input device that enables interaction across a range of ad-hoc surfaces including desks, pants, palms and fingertips with seamless switching between them. This versatility supports systems that require both high precision as well as mobile control, such as mobile XR. FlowRing consists of a miniature optical flow sensor, skin-contact microphone, and IMU, providing a unique ergonomic design that rests at the base of the finger like conventional jewelry. We show the potential of FlowRing to enable precise control of interfaces on available surfaces via music player application and whiteboarding application.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Electrical & Computer Engineering" + } + ], + "personId": 170038 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 169910 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School of Computer Science & Engineering" + } + ], + "personId": 170674 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Paul G. Allen School for Computer Science & Engineering" + } + ], + "personId": 169822 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170608 + } + ] + }, + { + "id": 171016, + "typeId": 13748, + "title": "LoopBot: Representing Continuous Haptics of Grounded Objects in Room-scale VR", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676389" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1650", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 175059, + 171041 + ], + "eventIds": [], + "abstract": "In room-scale virtual reality, providing continuous haptic feedback from touching grounded objects, such as walls and handrails, has been challenging due to the user's walking range and the required force. In this study, we propose LoopBot, a novel technique to provide continuous haptic feedback from grounded objects using only a single user-following robot. Specifically, LoopBot is equipped with a loop-shaped haptic prop attached to an omnidirectional robot that scrolls to cancel out the robot's displacement, giving the user the haptic sensation that the prop is actually fixed in place, or ``grounded.'' We first introduce the interaction design space of LoopBot and, as one of its promising interaction scenarios, implement a prototype for the experience of walking while grasping handrails. A performance evaluation shows that scrolling the prop cancels $77.5\\%$ of the robot's running speed on average. A preliminary user test ($N=10$) also shows that the subjective realism of the experience and the sense of the virtual handrails being grounded were significantly higher than when the prop was not scrolled. Based on these findings, we discuss possible further development of LoopBot.", + "authors": [ + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Sendai", + "institution": "Tohoku University", + "dsl": "" + } + ], + "personId": 170222 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "Miyagi", + "city": "Sendai", + "institution": "Tohoku University", + "dsl": "Research Institute of Electrical Communication" + } + ], + "personId": 169983 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Miyagi", + "institution": "Tohoku University", + "dsl": "" + } + ], + "personId": 170625 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Sendai", + "institution": "Tohoku University", + "dsl": "" + } + ], + "personId": 170196 + }, + { + "affiliations": [ + { + "country": "Japan", + "state": "", + "city": "Sendai", + "institution": "Tohoku University", + "dsl": "Research Institute of Electrical Communication" + } + ], + "personId": 170296 + } + ] + }, + { + "id": 171017, + "typeId": 13748, + "title": "Predicting the Limits: Tailoring Unnoticeable Hand Redirection Offsets in Virtual Reality to Individuals’ Perceptual Boundaries", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676425" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-1539", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171030, + 175072 + ], + "eventIds": [], + "abstract": "Many illusion and interaction techniques in Virtual Reality (VR) rely on Hand Redirection (HR), which has proved to be effective as long as the introduced offsets between the position of the real and virtual hand do not noticeably disturb the user experience. Yet calibrating HR offsets is a tedious and time-consuming process involving psychophysical experimentation, and the resulting thresholds are known to be affected by many variables---limiting HR's practical utility. As a result, there is a clear need for alternative methods that allow tailoring HR to the perceptual boundaries of individual users. We conducted an experiment with 18 participants combining movement, eye gaze and EEG data to detect HR offsets Below, At, and Above individuals' detection thresholds. Our results suggest that we can distinguish HR At and Above from no HR. Our exploration provides a promising new direction with potentially strong implications for the broad field of VR illusions.", + "authors": [ + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "DFKI and Saarland University, Saarland Informatics Campus", + "dsl": "" + } + ], + "personId": 170115 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "Saarland Informatics Campus", + "dsl": "DFKI" + } + ], + "personId": 170142 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Berlin", + "institution": "TU Berlin", + "dsl": "" + } + ], + "personId": 169921 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "Saarland University, Saarland Informatics Campus", + "dsl": "" + } + ], + "personId": 170054 + }, + { + "affiliations": [ + { + "country": "Singapore", + "state": "", + "city": "Singapore", + "institution": "Singapore Management University", + "dsl": "" + } + ], + "personId": 170339 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "DFKI, Saarland Informatics Campus", + "dsl": "" + } + ], + "personId": 170451 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "DFKI, Saarland Informatic Campus", + "dsl": "" + } + ], + "personId": 170143 + }, + { + "affiliations": [ + { + "country": "Germany", + "state": "", + "city": "Saarbrücken", + "institution": "DFKI, Saarland Informatics Campus", + "dsl": "" + } + ], + "personId": 170530 + } + ] + }, + { + "id": 171018, + "typeId": 13748, + "title": "IRIS: Wireless Ring for Vision-based Smart Home Interaction", + "addons": { + "doi": { + "type": "doiLink", + "url": "10.1145/3654777.3676327" + } + }, + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24a-2746", + "source": "PCS", + "trackId": 13206, + "tags": [], + "keywords": [], + "sessionIds": [ + 171056, + 175062 + ], + "eventIds": [], + "abstract": "Integrating cameras into wireless smart rings has been challenging due to size and power constraints. We introduce IRIS, the first wireless vision-enabled smart ring system for smart home interactions. Equipped with a camera, Bluetooth radio, inertial measurement unit (IMU), and an onboard battery, IRIS meets the small size, weight, and power (SWaP) requirements for ring devices. IRIS is context-aware, adapting its gesture set to the detected device, and can last for 16-24 hours on a single charge. IRIS leverages the scene semantics to achieve instance-level device recognition. In a study involving 23 participants, IRIS consistently outpaced voice commands, with a higher proportion of participants expressing a preference for IRIS over voice commands regarding toggling a device's state, granular control, and social acceptability. Our work pushes the boundary of what is possible with ring form-factor devices, addressing system challenges and opening up novel interaction capabilities.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Mobile Intelligence Lab" + } + ], + "personId": 169691 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Mobile Intelligence Lab" + } + ], + "personId": 170607 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Mobile Intelligence Lab" + } + ], + "personId": 170656 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170602 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170591 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170504 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "" + } + ], + "personId": 170608 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "University of Washington", + "dsl": "Mobile Intelligence Lab" + } + ], + "personId": 169985 + } + ] + }, + { + "id": 171101, + "typeId": 13758, + "title": "Opening Remarks & Keynote (Raj Reddy)", + "recognitionIds": [], + "isBreak": false, + "importedId": "1", + "source": "CSV", + "trackId": 13017, + "tags": [], + "keywords": [], + "sessionIds": [ + 171066 + ], + "eventIds": [], + "abstract": "A conversation with Raj Reddy, one of the pioneers of large-scale AI systems, on the future of AI and its role in the society.", + "authors": [ + { + "affiliations": [], + "personId": 171100 + } + ] + }, + { + "id": 171103, + "typeId": 13758, + "title": "Keynote: Photorealistic Telepresence", + "recognitionIds": [], + "isBreak": false, + "importedId": "2", + "source": "CSV", + "trackId": 13017, + "tags": [], + "keywords": [], + "sessionIds": [ + 171065 + ], + "eventIds": [], + "abstract": "Telepresence has the potential to bring billions of people into artificial reality (AR/MR/VR). It is the next step in the evolution of telecommunication, from telegraphy to telephony to videoconferencing. In this talk, I will describe early steps taken at Meta Reality Pittsburgh towards achieving photorealistic telepresence: realtime social interactions in AR/VR with avatars that look like you, move like you, and sound like you. If successful, photorealistic telepresence will introduce pressure for the concurrent development of the next generation of algorithms and computing platforms for computer vision and computer graphics. In particular, I will introduce codec avatars: the use of neural networks to unify the computer vision (inference) and computer graphics (rendering) problems in signal transmission and reception. The creation of codec avatars require capture systems of unprecedented 3D sensing resolution, which I will also describe.", + "authors": [ + { + "affiliations": [], + "personId": 171102 + } + ] + }, + { + "id": 171110, + "typeId": 13758, + "title": "Test of Time Award Talk", + "recognitionIds": [], + "isBreak": false, + "importedId": "3", + "source": "CSV", + "trackId": 13017, + "tags": [], + "keywords": [], + "sessionIds": [ + 171067 + ], + "eventIds": [], + "authors": [] + }, + { + "id": 172831, + "typeId": 13757, + "title": "MemoVis: A GenAI-Powered Tool for Creating Companion Reference Images for 3D Design Feedback", + "recognitionIds": [], + "isBreak": false, + "importedId": "uist24h-1003", + "source": "PCS", + "trackId": 13207, + "tags": [], + "keywords": [], + "sessionIds": [ + 175068, + 171052 + ], + "eventIds": [], + "abstract": "Providing asynchronous feedback is a critical step in the 3D design workflow. A common approach to providing feedback is to pair textual comments with companion reference images, which helps illustrate the gist of text. Ideally, feedback providers should possess 3D and image editing skills to create reference images that can effectively describe what they have in mind. However, they often lack such skills, so they have to resort to sketches or online images which might not match well with the current 3D design. To address this, we introduce MemoVis, a text editor interface that assists feedback providers in creating reference images with generative AI driven by the feedback comments. First, a novel real-time viewpoint suggestion feature, based on a vision-language foundation model, helps feedback providers anchor a comment with a camera viewpoint. Second, given a camera viewpoint, we introduce three types of image modifiers, based on pre-trained 2D generative models, to turn a text comment into an updated version of the 3D scene from that viewpoint. We conducted a within-subjects study with 14 feedback providers, demonstrating the effectiveness of MemoVis. The quality and explicitness of the companion images were evaluated by another eight participants with prior 3D design experience.", + "authors": [ + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "La Jolla", + "institution": "University of California San Diego", + "dsl": "Computer Science and Engineering" + }, + { + "country": "United States", + "state": "California", + "city": "La Jolla", + "institution": "University of California San Diego", + "dsl": "The Design Lab" + } + ], + "personId": 172828 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 172827 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "San Francisco", + "institution": "Adobe Research", + "dsl": "" + } + ], + "personId": 172830 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "Washington", + "city": "Seattle", + "institution": "Adobe", + "dsl": "Adobe Research" + } + ], + "personId": 172825 + }, + { + "affiliations": [ + { + "country": "United States", + "state": "California", + "city": "La Jolla", + "institution": "University of California San Diego", + "dsl": "Computer Science and Engineering" + }, + { + "country": "United States", + "state": "California", + "city": "La Jolla", + "institution": "University of California San Diego", + "dsl": "The Design Lab" + } + ], + "personId": 172826 + } + ] + } + ], + "people": [ + { + "id": 169677, + "firstName": "Hariharan", + "lastName": "Subramonyam", + "middleInitial": "", + "importedId": "8uB_Jqw_cfCMBk7zG0Bx_g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169678, + "firstName": "Lihan", + "lastName": "Chen", + "middleInitial": "", + "importedId": "UWZ4mdQmqBe7beI4aZW3oA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169679, + "firstName": "Nikolas", + "lastName": "Martelaro", + "middleInitial": "", + "importedId": "9NGSZTdtf2HFcb1UzsbraQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169680, + "firstName": "Shang", + "lastName": "Shi", + "middleInitial": "", + "importedId": "--zL0DV8od4IdyjVswfl2Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169681, + "firstName": "Mary", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "FeB0H5D_SMe-ZWm4nSp_Ww", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169682, + "firstName": "Xiyun", + "lastName": "Hu", + "middleInitial": "", + "importedId": "cMU6fGiQps5q3szib1nD7A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169683, + "firstName": "Rahul", + "lastName": "Jain", + "middleInitial": "", + "importedId": "mfTaRRSgYeJ2PsLpZSedWg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169684, + "firstName": "Jingwei", + "lastName": "Ma", + "middleInitial": "", + "importedId": "pGH0Hz8JnOO4PYv5nRO_wg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169685, + "firstName": "Kashyap", + "lastName": "Todi", + "middleInitial": "", + "importedId": "biWjVLs70rX0liHWYQRenQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169686, + "firstName": "Jun", + "lastName": "Nishida", + "middleInitial": "", + "importedId": "Ws6-56wJYVvZ4w-Uz7-BjQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169688, + "firstName": "Ximing", + "lastName": "Shen", + "middleInitial": "", + "importedId": "xLWldfGX2kKvl1W4eMw2TQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169689, + "firstName": "Masaaki", + "lastName": "Mochimaru", + "middleInitial": "", + "importedId": "MY_lixu7jegagOenZnYzGA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169690, + "firstName": "Jacob", + "lastName": "Gettig", + "middleInitial": "", + "importedId": "KPyJqJuykTX8B6MB_tmluA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169691, + "firstName": "Maruchi", + "lastName": "Kim", + "middleInitial": "", + "importedId": "r_0sGHHd_MUA2mydi1lEbQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169692, + "firstName": "Lyumanshan", + "lastName": "Ye", + "middleInitial": "", + "importedId": "FkrmMSiMlLOTZIZTPt3qmw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169693, + "firstName": "John", + "lastName": "Madden", + "middleInitial": "David Wyndham", + "importedId": "7Gk3_hdxpmd2SIXpS_J4_A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169694, + "firstName": "Yifu", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "A4KsNhwtVT4tilSVZzrPnw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169695, + "firstName": "Michael", + "lastName": "Wessely", + "middleInitial": "", + "importedId": "qRALpo5vu6f15uoXT7L8hw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169696, + "firstName": "Marwa", + "lastName": "AlAlawi", + "middleInitial": "", + "importedId": "Et85KPRF7bO0zVqYuARZrw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169697, + "firstName": "Roger", + "lastName": "Boldu", + "middleInitial": "", + "importedId": "kyzo3UIA3FQB_trpIo5Hsg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169698, + "firstName": "Victor", + "lastName": "S. Bursztyn", + "middleInitial": "", + "importedId": "obeUyaAH70ug85GAyZ-tOg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169699, + "firstName": "Jingyu", + "lastName": "Shi", + "middleInitial": "", + "importedId": "D6Z-UGT8CuISvCYUNegRAQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169700, + "firstName": "Julia", + "lastName": "Kleinau", + "middleInitial": "", + "importedId": "1Akf800vMaftcSyzqMPFRA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169701, + "firstName": "Noura", + "lastName": "Howell", + "middleInitial": "", + "importedId": "8Fz7_5RXgtO4TW_krjv_3w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169702, + "firstName": "Yuta", + "lastName": "Noma", + "middleInitial": "", + "importedId": "LRAI1qMkxkkyiBpyVSaBOA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169703, + "firstName": "Ashlyn", + "lastName": "Sparrow", + "middleInitial": "", + "importedId": "hn2QLZ1gcEQ5aMfHrACYDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169704, + "firstName": "Qing", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "Q6BVkFDfzqdL3mLrZqrBIQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169705, + "firstName": "J", + "lastName": "Gonzalez Avila", + "middleInitial": "Felipe", + "importedId": "tCYEvEa0tTdfSoXtmAHuEA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169706, + "firstName": "Nick", + "lastName": "Colonnese", + "middleInitial": "", + "importedId": "XXRxqHvvUkn4WQL000-5vw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169707, + "firstName": "Jun", + "lastName": "Rekimoto", + "middleInitial": "", + "importedId": "2Guk0EWER7zUs-hWrmCSsQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169708, + "firstName": "Ian", + "lastName": "Oakley", + "middleInitial": "", + "importedId": "b2tvt-MyhdyGByDL4JTqtQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169710, + "firstName": "Per Ola", + "lastName": "Kristensson", + "middleInitial": "", + "importedId": "Pt7egBTd0PmDizY4N8kNDQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169711, + "firstName": "Serene", + "lastName": "Cheon", + "middleInitial": "", + "importedId": "vHqVv41WdRLT4Em-F_urdw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169712, + "firstName": "Hanfeng", + "lastName": "Cai", + "middleInitial": "", + "importedId": "buwDQdSqXzMdZj6KJjrDcw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169713, + "firstName": "Ying", + "lastName": "Li", + "middleInitial": "", + "importedId": "thxB3ndnX9x21ymKWlLIqw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169714, + "firstName": "Devin", + "lastName": "Balkcom", + "middleInitial": "", + "importedId": "eNDj-fxEbQWKwS52lGZbZQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169715, + "firstName": "Jas", + "lastName": "Brooks", + "middleInitial": "", + "importedId": "k2LyUqEimjE87jfl9U9VgQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169716, + "firstName": "Xiaofu", + "lastName": "Jin", + "middleInitial": "", + "importedId": "RFGQNlNOqV5RejD6w1pKJA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169717, + "firstName": "Peitong", + "lastName": "Duan", + "middleInitial": "", + "importedId": "DctRVnS9-nqMKIFkxtBMqQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169718, + "firstName": "Jaeheon", + "lastName": "Kwak", + "middleInitial": "", + "importedId": "g5f1E5wEgzMJ_ugLB0-H8g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169719, + "firstName": "Tianren", + "lastName": "Luo", + "middleInitial": "", + "importedId": "slLBKiGAjSYbXIuph7-uNg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169720, + "firstName": "Micha", + "lastName": "Offe", + "middleInitial": "", + "importedId": "HfOLNAwUTYUvxqEii-qKMQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169721, + "firstName": "Ryo", + "lastName": "Takahashi", + "middleInitial": "", + "importedId": "W1xfJH1IwLShsQas8yM1sQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169722, + "firstName": "Zhiming", + "lastName": "Hu", + "middleInitial": "", + "importedId": "evcQEtJTIGDkKUM_0i1kVg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169723, + "firstName": "Jian", + "lastName": "Zhao", + "middleInitial": "", + "importedId": "8dq-zKTcAycM099Ji424lg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169724, + "firstName": "Gilbert", + "lastName": "Bernstein", + "middleInitial": "Louis", + "importedId": "gRvdpGFNnALSO0odfwoP_w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169725, + "firstName": "Yiyue", + "lastName": "Luo", + "middleInitial": "", + "importedId": "5CPGyyXKdVhEYOpiJhyCIg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169726, + "firstName": "Megan", + "lastName": "Hofmann", + "middleInitial": "", + "importedId": "C82eVJWjVtjKXzredlQKUg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169727, + "firstName": "Catherine", + "lastName": "Mei", + "middleInitial": "", + "importedId": "539paV8-TbCGwAqG1E0lWA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169728, + "firstName": "Qianzhi", + "lastName": "Jing", + "middleInitial": "", + "importedId": "o5Nt529Lr9SOHsGDKr53WQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169729, + "firstName": "Sarah", + "lastName": "Chasins", + "middleInitial": "E.", + "importedId": "8vOMlM09isbluPujz5S5bA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169730, + "firstName": "Jieshan", + "lastName": "Chen", + "middleInitial": "", + "importedId": "APD5zlTaGFwbTC-zXn94wg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169731, + "firstName": "Bala", + "lastName": "Kumaravel", + "middleInitial": "", + "importedId": "bBD3eKAu9ZqStsM3ZmVDtg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169732, + "firstName": "Qiao", + "lastName": "Jin", + "middleInitial": "", + "importedId": "ZmlV-dnbnVL5fRjlxwufZg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169733, + "firstName": "Eric", + "lastName": "Gilbert", + "middleInitial": "", + "importedId": "gU1A2fXqwFHv6AZHDz2xTw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169734, + "firstName": "Prakarsh", + "lastName": "Yadav", + "middleInitial": "", + "importedId": "5faZcUkGjuXFkqmHndmTEg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169735, + "firstName": "Angelique", + "lastName": "Taylor", + "middleInitial": "", + "importedId": "HB4AE6fNBF5HqSd12hitxw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169736, + "firstName": "Peli", + "lastName": "de Halleux", + "middleInitial": "", + "importedId": "Ky7DGX_tE8kThIohnvWjtw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169737, + "firstName": "Afroza", + "lastName": "Sultana", + "middleInitial": "", + "importedId": "jTSub6I8VP_R1iSPyal14g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169738, + "firstName": "Jiayi", + "lastName": "Lu", + "middleInitial": "", + "importedId": "4SSPBHtDWbk-Lp0MTGrSFg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169739, + "firstName": "Yoshihiro", + "lastName": "Kawahara", + "middleInitial": "", + "importedId": "7kMCYcI5yPwQRg4zZ61IkQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169740, + "firstName": "Shigeyuki", + "lastName": "Hirai", + "middleInitial": "", + "importedId": "GkJRln5zahKeWqSXX7QqpA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169741, + "firstName": "Hsuanling", + "lastName": "Lee", + "middleInitial": "", + "importedId": "YNJVYFABSLFaOwJhSTG3bg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169742, + "firstName": "Jan", + "lastName": "Borchers", + "middleInitial": "", + "importedId": "cimyCe-7YmMY4moG3R4DqQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169743, + "firstName": "Richard (Weiye)", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "NLgHTbSpNwm81xpKG0SWuw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169744, + "firstName": "Ivan", + "lastName": "Fernández", + "middleInitial": "", + "importedId": "YN6vvl_68i21RUQ9u544uw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169745, + "firstName": "Hanxuan", + "lastName": "Li", + "middleInitial": "", + "importedId": "2Ux2aDTS9DP5ucYzwNqV6Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169746, + "firstName": "Yuanchun", + "lastName": "Li", + "middleInitial": "", + "importedId": "MxoeeKrFRmePME0Hs6qW-A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169747, + "firstName": "Yuyang", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "O_fvEctor44kV0dZZ0rGmA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169748, + "firstName": "Naveen", + "lastName": "Sendhilnathan", + "middleInitial": "", + "importedId": "OX0byDaLmKXlOacRrH1XMw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169749, + "firstName": "Yang", + "lastName": "Liu", + "middleInitial": "", + "importedId": "ZJaZ7xCUXysv_PImUi-STw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169750, + "firstName": "Rahaf", + "lastName": "Alharbi", + "middleInitial": "", + "importedId": "CoE409NMieKxoESvE8g6xA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169751, + "firstName": "Amy", + "lastName": "Koike", + "middleInitial": "", + "importedId": "dGo8wYptMkSaSnzJ-jbe8Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169752, + "firstName": "Alexander", + "lastName": "Wang", + "middleInitial": "", + "importedId": "fD_KNObsNIf06AEzLnVp2w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169753, + "firstName": "Teng", + "lastName": "Han", + "middleInitial": "", + "importedId": "tHIfaVnGk0hgRd0jb9BRUw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169754, + "firstName": "Ethan", + "lastName": "Tam", + "middleInitial": "", + "importedId": "wdIYRFb8NbZ8UgsTLH-dxw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169755, + "firstName": "Enrico", + "lastName": "Rukzio", + "middleInitial": "", + "importedId": "FrzgGkMaoVPeiaBR8A7mbg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169756, + "firstName": "Shengdong", + "lastName": "Zhao", + "middleInitial": "", + "importedId": "rekgBuhTFAmu01G8SJEKbQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169757, + "firstName": "Christopher", + "lastName": "MacLellan", + "middleInitial": "", + "importedId": "LCxxvO8k59bE1B22d5fDDQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169758, + "firstName": "Yijie", + "lastName": "Li", + "middleInitial": "", + "importedId": "NqDL0SjakMd_hVEDk99_-Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169759, + "firstName": "Pinhao", + "lastName": "Guo", + "middleInitial": "", + "importedId": "MWrtC9OvY612Ip_7cFbPgw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169760, + "firstName": "Dajin", + "lastName": "Lee", + "middleInitial": "", + "importedId": "1XQQN7FC262LI23ud0J9gA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169761, + "firstName": "Yongzhao", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "PuotSzin0lc17bQ06Ejjdg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169762, + "firstName": "Arissa J.", + "lastName": "Sato", + "middleInitial": "", + "importedId": "dAzuYj3F1suEYz6xHldmTg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169763, + "firstName": "Ahmed", + "lastName": "Elsharkawy", + "middleInitial": "", + "importedId": "JOHCEfIkHkqmMZ0xC4TbIw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169764, + "firstName": "Thomas", + "lastName": "Ball", + "middleInitial": "J", + "importedId": "l-yXcXDCluVkLnJv3TmVtQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169765, + "firstName": "Henrik", + "lastName": "Voigt", + "middleInitial": "", + "importedId": "9eDFOPBFAegr3IYBxkG1vQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169766, + "firstName": "Amrita", + "lastName": "Ganguly", + "middleInitial": "", + "importedId": "Lit6tCKvKGpU3iZ0Hvn2Ug", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169767, + "firstName": "Rawal", + "lastName": "Khirodkar", + "middleInitial": "", + "importedId": "sQKL3g8mvQN2Se1n91Si-g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169768, + "firstName": "Chunyuan", + "lastName": "Zheng", + "middleInitial": "", + "importedId": "4e6SBLE6p5bu2Q9t2j95Sw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169769, + "firstName": "Harish Ram", + "lastName": "Nambiappan", + "middleInitial": "", + "importedId": "WfLaRhjfAetmSqALmAdU0Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169770, + "firstName": "Siyi", + "lastName": "Zhu", + "middleInitial": "", + "importedId": "mBy94d5RTFxIiFKSqEmOcA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169771, + "firstName": "De-Yuan", + "lastName": "Lu", + "middleInitial": "", + "importedId": "HufVxLSRNBwGvYEuv6daNw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169772, + "firstName": "Yixin", + "lastName": "Tsang", + "middleInitial": "", + "importedId": "sFC9_T7a0dTaG8uV6v6yzQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169773, + "firstName": "Ying", + "lastName": "Lei", + "middleInitial": "", + "importedId": "10EuQQ5XmrWHQ6ZN6qwwJA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169774, + "firstName": "Yu", + "lastName": "Soma", + "middleInitial": "", + "importedId": "ToS5OO9hK95ZiLV9YjjhGQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169775, + "firstName": "Hanze", + "lastName": "Jia", + "middleInitial": "", + "importedId": "avJfwNMCdF0RwPn_wKiW2A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169776, + "firstName": "Natalia", + "lastName": "Sanchez-Tamayo", + "middleInitial": "", + "importedId": "lQSXJH1O3XWoe8wu1iavjQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169777, + "firstName": "Jisu", + "lastName": "Yim", + "middleInitial": "", + "importedId": "zlAs0ooneBWa6_GhStZJuA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169778, + "firstName": "James", + "lastName": "Eagan", + "middleInitial": "R.", + "importedId": "GYSZ0Vva2ZzeH3G0fRFrHQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169779, + "firstName": "Mustafa Doga", + "lastName": "Dogan", + "middleInitial": "", + "importedId": "iFn4fAUAYoPYRz95BeIWHA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169780, + "firstName": "Ayaka", + "lastName": "Ishii", + "middleInitial": "", + "importedId": "893h0ebCAyLrDOeI-aXqNg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169781, + "firstName": "Elisa", + "lastName": "Rubegni", + "middleInitial": "", + "importedId": "mAP65sZbXTtCRzN2la215Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169782, + "firstName": "Jonathan", + "lastName": "Kummerfeld", + "middleInitial": "K.", + "importedId": "Rlug3iHIAVNB-ACP6K4GMA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169783, + "firstName": "Melissa", + "lastName": "Roemmele", + "middleInitial": "", + "importedId": "TIWq1eNGlyD0uBP6WV_rJQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169784, + "firstName": "Xin", + "lastName": "Tang", + "middleInitial": "", + "importedId": "JO44LEJB_z-H4vCaZsr0BQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169785, + "firstName": "Ryo", + "lastName": "Suzuki", + "middleInitial": "", + "importedId": "GN-jA9I12mj5OyFiQL9Cxg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169786, + "firstName": "Jiawei", + "lastName": "Fang", + "middleInitial": "", + "importedId": "x2q5AcGxYn2vKB88GLMaPg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169787, + "firstName": "Elliot", + "lastName": "Evans", + "middleInitial": "", + "importedId": "efwwEgKv5QpsfZkJKknppg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169788, + "firstName": "Ye", + "lastName": "Tao", + "middleInitial": "", + "importedId": "-Z8K7gpdIwpjDVQKoqDbag", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169789, + "firstName": "Mingyu", + "lastName": "Xie", + "middleInitial": "", + "importedId": "VJS0sh9YYUSwGkAAaV2lAA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169790, + "firstName": "Zhaowen", + "lastName": "Deng", + "middleInitial": "", + "importedId": "PFFeks29Z-VO2Wvh3H9NaA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169791, + "firstName": "Zhihao", + "lastName": "Yao", + "middleInitial": "", + "importedId": "rk0MtRVSxp1z436zSwI5Ew", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169792, + "firstName": "Quan", + "lastName": "Li", + "middleInitial": "", + "importedId": "EMaNOYI4P5ij7ZGTXbHLZQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169793, + "firstName": "Xincheng", + "lastName": "Huang", + "middleInitial": "", + "importedId": "v3zAQYGoH5MEJIYuGWuEsw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169794, + "firstName": "Narjes", + "lastName": "Pourjafarian", + "middleInitial": "", + "importedId": "dKam8rqeq1Nshw-wEtiBwQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169795, + "firstName": "Jieyu", + "lastName": "Zhou", + "middleInitial": "", + "importedId": "ZVm6Ez6OzxW15OROWGmHIA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169796, + "firstName": "Youichi", + "lastName": "Kamiyama", + "middleInitial": "", + "importedId": "44r8qfZSqfYmgPPbqVmaMQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169797, + "firstName": "Sunbum", + "lastName": "Kim", + "middleInitial": "", + "importedId": "w92aTpJvY_RwolUGbFYGZg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169798, + "firstName": "Chelsea", + "lastName": "Finn", + "middleInitial": "", + "importedId": "h4scFzS-T5wdyZt01YQvQA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169799, + "firstName": "Zhenchang", + "lastName": "Xing", + "middleInitial": "", + "importedId": "2W-eRE8FGO6UBoeBae5_Zw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169800, + "firstName": "Ashwat", + "lastName": "Chidambaram", + "middleInitial": "", + "importedId": "tQHsPSFaVO7KykK1TFHrSg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169801, + "firstName": "Jun", + "lastName": "Wang", + "middleInitial": "", + "importedId": "-fkUNYZLC9usPuKBbwNdZg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169802, + "firstName": "Hiroaki", + "lastName": "Murakami", + "middleInitial": "", + "importedId": "J2PcEJWwzJi5cz4f17rtuA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169803, + "firstName": "Wanli", + "lastName": "Qian", + "middleInitial": "", + "importedId": "epWd8xXwFNsREk1kDo_Kfg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169804, + "firstName": "Chuan", + "lastName": "Yan", + "middleInitial": "", + "importedId": "5MMwRsfNjhqQeqp1j1QQtA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169805, + "firstName": "Hiroyuki", + "lastName": "Manabe", + "middleInitial": "", + "importedId": "GPbdshGJqVFsnL-tpVbWZw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169806, + "firstName": "Andy", + "lastName": "Kong", + "middleInitial": "", + "importedId": "vwRviA-fIuH8iUU--jTUxw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169807, + "firstName": "Hohurn", + "lastName": "Jung", + "middleInitial": "", + "importedId": "lrpyr_z54zS5UI48zoa18w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169808, + "firstName": "Tanya", + "lastName": "Jonker", + "middleInitial": "R.", + "importedId": "iBMEnW-p-vxQsZr9R68pVQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169809, + "firstName": "David", + "lastName": "Bizer", + "middleInitial": "", + "importedId": "22XuM5m1UfDX6_VQsi71_w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169810, + "firstName": "Géry", + "lastName": "Casiez", + "middleInitial": "", + "importedId": "DIpebubG6g2BUtnvQQ6Pcg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169811, + "firstName": "Wei-Ju", + "lastName": "Lin", + "middleInitial": "", + "importedId": "KnUVXBpMyt8o1dHHUTfw6w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169812, + "firstName": "Li-Yi", + "lastName": "Wei", + "middleInitial": "", + "importedId": "wINqyPgzdC9m_wySH9U_XA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169813, + "firstName": "Jennifer", + "lastName": "Jacobs", + "middleInitial": "", + "importedId": "dNmqJWscQmAhaZifgd-Bow", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169814, + "firstName": "Chia-Chen", + "lastName": "Chi", + "middleInitial": "", + "importedId": "A1pl8of0SvwbMKG2tgiaaQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169815, + "firstName": "Brendan", + "lastName": "Langen", + "middleInitial": "", + "importedId": "4WzB7n6BvtlcssCxlmjn_w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169816, + "firstName": "Imran", + "lastName": "Kabir", + "middleInitial": "", + "importedId": "y7_jpvMsRdIjX5F2vAe99g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169817, + "firstName": "Arthur", + "lastName": "Fleig", + "middleInitial": "", + "importedId": "JFfNlQ-FB5edNShH-h74Kg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169818, + "firstName": "Kyzyl", + "lastName": "Monteiro", + "middleInitial": "", + "importedId": "WHdTqYyms-c_XqcUEoblDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169819, + "firstName": "Jiatian", + "lastName": "Sun", + "middleInitial": "", + "importedId": "VsLrfIKikuQa2SXVkSWvDg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169820, + "firstName": "Chithra", + "lastName": "Anand", + "middleInitial": "", + "importedId": "UfK2mIh6CqgLEK91kpCcDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169821, + "firstName": "Yutaro", + "lastName": "Hirao", + "middleInitial": "", + "importedId": "T16gyaTAILEtODULPDvToQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169822, + "firstName": "Anandghan", + "lastName": "Waghmare", + "middleInitial": "", + "importedId": "T9RZYsEDPpgumkkV8i7ukQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169823, + "firstName": "Hasti", + "lastName": "Seifi", + "middleInitial": "", + "importedId": "UZtGtr9EZ_3q1MzI9s9k7Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169824, + "firstName": "Ji Won", + "lastName": "Chung", + "middleInitial": "", + "importedId": "HDCTiwqZrtR6X17GLZt7Mw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169825, + "firstName": "Anton", + "lastName": "Winter", + "middleInitial": "", + "importedId": "pl90IplSzwvoh7gfEcnljw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169826, + "firstName": "Gaurav", + "lastName": "Sharma", + "middleInitial": "", + "importedId": "Bm6DTUhtlAnhfEdJOOJwYw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169827, + "firstName": "Mike", + "lastName": "Chen", + "middleInitial": "Y.", + "importedId": "lHUnsX0Tf6ZdyKH8WPjT9w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169828, + "firstName": "Yixuan", + "lastName": "Li", + "middleInitial": "", + "importedId": "FkPS0pqn_Wve6BDmOFw2Iw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169829, + "firstName": "Greg", + "lastName": "Hughes", + "middleInitial": "", + "importedId": "KIOO-QQiSlYfsxeIff7KDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169830, + "firstName": "Minji", + "lastName": "Park", + "middleInitial": "", + "importedId": "RUlwwRelVYE0esiHA2TiWA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169831, + "firstName": "Hongbo", + "lastName": "ZHANG", + "middleInitial": "", + "importedId": "v9TT6Qyyc7-YvEGhHixhng", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169832, + "firstName": "Yuanhao", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "HsGhdK97rhN_Y3EO9XZ4Pg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169833, + "firstName": "Zichen", + "lastName": "LIU", + "middleInitial": "", + "importedId": "lHUQcDHVmBo15ulDe0Zfwg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169834, + "firstName": "Chengbo", + "lastName": "Zheng", + "middleInitial": "", + "importedId": "NfCY3ekUUx0e0sS3ayWTaA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169835, + "firstName": "Amritansh", + "lastName": "Kwatra", + "middleInitial": "", + "importedId": "xFyiaHf57WfnKfS5BBCOfw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169836, + "firstName": "Jingyue", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "I7JRIn6Acz-dWEuKD3cB1g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169837, + "firstName": "Yi-Sheng", + "lastName": "Chan", + "middleInitial": "", + "importedId": "TzhrmxVi_kI7Oem-eUnzFg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169838, + "firstName": "Robert", + "lastName": "Jacob", + "middleInitial": "J.K", + "importedId": "9Ay0s9L7-555NI4YoCiEZA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169839, + "firstName": "Eunice", + "lastName": "Jun", + "middleInitial": "", + "importedId": "vqX_4F0zlxG1K9bAoTlgYA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169840, + "firstName": "Yu", + "lastName": "Liu", + "middleInitial": "", + "importedId": "cRm9LXltv5I6FneN30nGCw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169841, + "firstName": "Rubaiat Habib", + "lastName": "Kazi", + "middleInitial": "", + "importedId": "UJXDaT6uZ4fNhdFsUVs55w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169842, + "firstName": "IV", + "lastName": "Ramakrishnan", + "middleInitial": "", + "importedId": "Q1ffeGovRiRuPQ3YIyRf0w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169843, + "firstName": "Yifei", + "lastName": "Wu", + "middleInitial": "", + "importedId": "Lc3QZcD6hZsZI3wfasdwUw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169844, + "firstName": "Zheer", + "lastName": "Xu", + "middleInitial": "", + "importedId": "AekNariZFfr_q_OcOMZ-iQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169845, + "firstName": "Francois", + "lastName": "Guimbretiere", + "middleInitial": "", + "importedId": "haFD2OgOdA3IY14UrV_GbA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169846, + "firstName": "Fillia", + "lastName": "Makedon", + "middleInitial": "", + "importedId": "tun0Pu4yrQ5uURn9gkNxEA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169847, + "firstName": "Kecheng", + "lastName": "Jin", + "middleInitial": "", + "importedId": "CTBCOj6xxHd7xBAo13cwbQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169848, + "firstName": "Yoonho", + "lastName": "Lee", + "middleInitial": "", + "importedId": "FO0uahAghobcRcWEsGcODg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169849, + "firstName": "Ken", + "lastName": "Perlin", + "middleInitial": "", + "importedId": "q_HAZ9FA6-4bxYBde_YIFQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169850, + "firstName": "Jürgen", + "lastName": "Steimle", + "middleInitial": "", + "importedId": "s7t9OOHe8Q59g39YCU0IbQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169851, + "firstName": "Angus", + "lastName": "Forbes", + "middleInitial": "G.", + "importedId": "GtdBH4K6dPkEqQH0GYjvDQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169852, + "firstName": "Junyong", + "lastName": "Park", + "middleInitial": "", + "importedId": "4mXTDiL7oflI2WdalrzhMg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169853, + "firstName": "Hiromu", + "lastName": "Yakura", + "middleInitial": "", + "importedId": "Cagsdrez5mbKZAnMXKtw8g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169854, + "firstName": "Daniel", + "lastName": "Ashbrook", + "middleInitial": "", + "importedId": "32JeI_YKMn-zq1ONOD9hww", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169855, + "firstName": "Eytan", + "lastName": "Adar", + "middleInitial": "", + "importedId": "RKBfX-gdSuZlPy6Y9_ZrWg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169856, + "firstName": "Ken", + "lastName": "Pfeuffer", + "middleInitial": "", + "importedId": "NrpIHYjQBiGY3BcwqIdH0A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169857, + "firstName": "Jonathan", + "lastName": "Browder", + "middleInitial": "", + "importedId": "jrLsuz3EO1li4mITt0eNgw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169858, + "firstName": "Kakani", + "lastName": "Katija", + "middleInitial": "", + "importedId": "l3ov7gqB-37jUo_CWzJyIQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169859, + "firstName": "Jin", + "lastName": "Pan", + "middleInitial": "", + "importedId": "oFiTwTmQ_XDNaRDeYUXDxw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169860, + "firstName": "Chi-Jung", + "lastName": "Lee", + "middleInitial": "", + "importedId": "ygu5H9uKit8L6QRSsEhJdA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169861, + "firstName": "J. Matias", + "lastName": "Di Martino", + "middleInitial": "", + "importedId": "Z4bzQbK4JgqpJTzdjKjNjQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169862, + "firstName": "Shan-Yuan", + "lastName": "Teng", + "middleInitial": "", + "importedId": "Fr_MdyDQrCsSwLDzlppG2w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169863, + "firstName": "Jihong", + "lastName": "Huang", + "middleInitial": "", + "importedId": "hJAJ_hifT5e3BU5cfqUrmg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169864, + "firstName": "Bob Tianqi", + "lastName": "Wei", + "middleInitial": "", + "importedId": "JKwTn_dUnWAUcNsCLe23OQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169865, + "firstName": "Manuel", + "lastName": "Lopez-Amo", + "middleInitial": "", + "importedId": "Mo00c16NBndaeXir2Y94zw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169866, + "firstName": "Gil", + "lastName": "Zussman", + "middleInitial": "", + "importedId": "N9Rjbo2lBGYNS4SnQrJwVQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169867, + "firstName": "Cheng", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "1DDZdGdZa_bi_mwYItE6eA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169868, + "firstName": "Ying", + "lastName": "Cao", + "middleInitial": "", + "importedId": "t3k6WxooA-Aa_ppWvsZoXg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169869, + "firstName": "Ruiqi", + "lastName": "Chen", + "middleInitial": "", + "importedId": "-tx9uoGKLpTT0reXLB0uXg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169870, + "firstName": "Shuyue", + "lastName": "Feng", + "middleInitial": "", + "importedId": "LrSRVHoMETb8RzSrqqSOog", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169871, + "firstName": "Rúbia", + "lastName": "Reis Guerra", + "middleInitial": "", + "importedId": "w-pl6oM7_2le_JlbZZ91dA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169872, + "firstName": "Feng", + "lastName": "Qian", + "middleInitial": "", + "importedId": "rHnQxyPHa3o49ALDZ1TUpg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169873, + "firstName": "Yohan", + "lastName": "Lim", + "middleInitial": "", + "importedId": "XKlhW0tJfuqa7YEvSDdpNA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169874, + "firstName": "Michael", + "lastName": "Yin", + "middleInitial": "", + "importedId": "EoH-EG8CQJ6lIzCGe0iBYQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169875, + "firstName": "Camila", + "lastName": "Friedman-Gerlicz", + "middleInitial": "", + "importedId": "3SJsF0D_nsAiGbNih4y8jA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169876, + "firstName": "Zhuohao", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "uKGU9zWN5qVUbfiFk7f1kA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169877, + "firstName": "Hamed", + "lastName": "Rezazadegan Tavakoli", + "middleInitial": "", + "importedId": "LZJ0bSNj1-ggpGDOUsnWxg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169878, + "firstName": "Nandi", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "arfCSVQafGSuvCC1KpJv2Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169879, + "firstName": "Michael", + "lastName": "Rivera", + "middleInitial": "L.", + "importedId": "beqZ5_1cj2HatYEDoNE3zA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169880, + "firstName": "Weitao", + "lastName": "You", + "middleInitial": "", + "importedId": "7i4l4WfrYQPCSE8R7a8raw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169881, + "firstName": "Jenn", + "lastName": "Shanahan", + "middleInitial": "", + "importedId": "wFTZdNpNf-kkgrnuFIXCCg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169882, + "firstName": "Yao", + "lastName": "Lu", + "middleInitial": "", + "importedId": "hfyqXWLVehmx4FSo8OoJ_A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169883, + "firstName": "Jean", + "lastName": "Song", + "middleInitial": "Y", + "importedId": "E_hgqeraoPAoX_9UPasKoA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169884, + "firstName": "Mackenzie", + "lastName": "Leake", + "middleInitial": "", + "importedId": "iUeS_U98Vg4xQUr78-q7Kg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169885, + "firstName": "Catherine", + "lastName": "Rasgaitis", + "middleInitial": "L", + "importedId": "2ET-FQbCmJrXya1dxs7e4w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169886, + "firstName": "Buntarou", + "lastName": "Shizuki", + "middleInitial": "", + "importedId": "TJAWIctVjvJEKegv8Zj6KQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169887, + "firstName": "Cori", + "lastName": "Tymoszek Park", + "middleInitial": "", + "importedId": "DSUIzCHLP-Aw4d4ObkHo3Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169888, + "firstName": "Li", + "lastName": "Qiwei", + "middleInitial": "", + "importedId": "UXEpU9H9oAQqlghTpFDsig", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169889, + "firstName": "Karan", + "lastName": "Ahuja", + "middleInitial": "", + "importedId": "r2a43MVupOSOiXKLcv_W7w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169890, + "firstName": "Zhi", + "lastName": "Li", + "middleInitial": "", + "importedId": "JoKMTEDfnnU3qeOY9rJrvw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169891, + "firstName": "Khushman", + "lastName": "Patel", + "middleInitial": "", + "importedId": "y3-90G4taXeSfj_3OZF-UQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169892, + "firstName": "Yiheng", + "lastName": "Bian", + "middleInitial": "", + "importedId": "ohp6keaC59CCpkj0XSnpqQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169893, + "firstName": "Wendy", + "lastName": "Mackay", + "middleInitial": "E.", + "importedId": "ycUYXTFLv9qHpo3w--qmEA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169894, + "firstName": "Masahiko", + "lastName": "Inami", + "middleInitial": "", + "importedId": "ymV92K5P9NKDPz9t2QmJBw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169895, + "firstName": "Chun Meng", + "lastName": "Yu", + "middleInitial": "", + "importedId": "jdsL6wMcp3LlAys2Bvv7Vw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169896, + "firstName": "Geehyuk", + "lastName": "Lee", + "middleInitial": "", + "importedId": "5oayHmDaeFMZJ_onK64caA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169897, + "firstName": "Guanhua", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "BYJfje9_SdI5a7z1vFncWQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169898, + "firstName": "Luis", + "lastName": "Leiva", + "middleInitial": "A.", + "importedId": "zEw1asvR5_8wPjGUPy5Fbg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169899, + "firstName": "Shiqing", + "lastName": "Lyu", + "middleInitial": "", + "importedId": "-34GN1E3NwanCRHWGt6R8A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169900, + "firstName": "Tianhong", + "lastName": "Yu", + "middleInitial": "Catherine", + "importedId": "GjBvatlPKmFIpCliYjTNaQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169901, + "firstName": "Lingyun", + "lastName": "Sun", + "middleInitial": "", + "importedId": "anf4DjW8apv2gTtBT55RPA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169902, + "firstName": "Leixian", + "lastName": "Shen", + "middleInitial": "", + "importedId": "QUq6N3fSiZDjQs7uKffQbA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169903, + "firstName": "Sauvik", + "lastName": "Das", + "middleInitial": "", + "importedId": "GlXXSVWAEuozCVecsivhdg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169904, + "firstName": "Daniel", + "lastName": "Campos Zamora", + "middleInitial": "", + "importedId": "6ogbQtMZB_ppN0CQXzazOA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169905, + "firstName": "Xuewei", + "lastName": "Liang", + "middleInitial": "", + "importedId": "tPdRL9t_ZNnnQgbVxQB98g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169906, + "firstName": "Tiare", + "lastName": "Feuchtner", + "middleInitial": "", + "importedId": "JihlC4aKWjzTUU-0xSTBPg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169907, + "firstName": "John", + "lastName": "Chung", + "middleInitial": "Joon Young", + "importedId": "PGlpEVdS4_1vxTCbCETfCA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169908, + "firstName": "Vikram", + "lastName": "Iyer", + "middleInitial": "", + "importedId": "92IoBbKSghsf8BDt6VMShA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169909, + "firstName": "Yukang", + "lastName": "Yan", + "middleInitial": "", + "importedId": "3VxTjW6x8TBLTaM9O3Qh2w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169910, + "firstName": "Ishan", + "lastName": "Chatterjee", + "middleInitial": "", + "importedId": "S8IFANAmyzzQ1FUZgZGddQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169911, + "firstName": "Haoxiang", + "lastName": "Fan", + "middleInitial": "", + "importedId": "Bh6ebWm63WeeCbknojbqXQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169912, + "firstName": "Dennis", + "lastName": "Wittchen", + "middleInitial": "", + "importedId": "tzScPxj5BZic6GfXbozz4g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169913, + "firstName": "Zhipeng", + "lastName": "Li", + "middleInitial": "", + "importedId": "3SAjQF1S2E3cLBzH2JmkTg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169914, + "firstName": "Helena", + "lastName": "Vasconcelos", + "middleInitial": "", + "importedId": "CB724pk-JGogueUJZugCZw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169915, + "firstName": "Boyu", + "lastName": "Feng", + "middleInitial": "", + "importedId": "RoCOJ_S-UuMmW-mnPRhl-g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169916, + "firstName": "Nathan", + "lastName": "White", + "middleInitial": "Thomas", + "importedId": "MZpNyzepQYl11pRhMMxgSw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169917, + "firstName": "Ali", + "lastName": "Mazalek", + "middleInitial": "", + "importedId": "qbLXJZpQ0-yG3Y5SfLM8iQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169918, + "firstName": "Kosuke", + "lastName": "Shimizu", + "middleInitial": "", + "importedId": "-rl0i-ic4lpdcow5sCFabA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169919, + "firstName": "Hyelim", + "lastName": "Hwang", + "middleInitial": "", + "importedId": "Br8gzdbvnZ8On-UsD9Ju5w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169920, + "firstName": "Peizhong", + "lastName": "Gao", + "middleInitial": "", + "importedId": "B2n5Tkbaj0Z8No6T27aPVg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169921, + "firstName": "Lukas", + "lastName": "Gehrke", + "middleInitial": "", + "importedId": "ZQqRWpw_sxeXICrKxb90Iw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169922, + "firstName": "Md Touhidul", + "lastName": "Islam", + "middleInitial": "", + "importedId": "XnXh9S0oLXg4C1SwXa6IgA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169923, + "firstName": "Noushad", + "lastName": "Sojib", + "middleInitial": "", + "importedId": "pfe9mTkD8Sh9JMKCDPsp-w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169924, + "firstName": "Aditya", + "lastName": "Gunturu", + "middleInitial": "", + "importedId": "L-fUZou7gDziFta9jz6h1Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169925, + "firstName": "Nicolas", + "lastName": "Steck", + "middleInitial": "", + "importedId": "MR5L-oh-iHP6eb50-_aXyA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169926, + "firstName": "Karon", + "lastName": "MacLean", + "middleInitial": "E", + "importedId": "RyN2jgghUfKsXL0w2GGO2g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169927, + "firstName": "Chuhan", + "lastName": "Shi", + "middleInitial": "", + "importedId": "LukoRLUMLYuIgAqn1Fg-Vw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169928, + "firstName": "Hongni", + "lastName": "Ye", + "middleInitial": "", + "importedId": "IhFj3sL8XTbbLTXXN8Wh3w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169929, + "firstName": "Yiming", + "lastName": "Jiao", + "middleInitial": "", + "importedId": "Kdp1GxeuviSwUQZr_TT2kQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169930, + "firstName": "Yeonsu", + "lastName": "Kim", + "middleInitial": "", + "importedId": "vh3QZm49-03i0WQeowExNQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169931, + "firstName": "Zhaodong", + "lastName": "Jiang", + "middleInitial": "", + "importedId": "JJOOoI31fNAgkepLOdLatA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169932, + "firstName": "Yi-Chao", + "lastName": "Chen", + "middleInitial": "", + "importedId": "l_6j4sISVEM08FhPBVQV7g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169933, + "firstName": "Bo", + "lastName": "Han", + "middleInitial": "", + "importedId": "oIegnP0vcYGdAXyr0bYTvg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169934, + "firstName": "Leen", + "lastName": "Al Lababidi", + "middleInitial": "", + "importedId": "d5kaqsbta44cFM2LqKHNdw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169935, + "firstName": "Katherine", + "lastName": "Kuchenbecker", + "middleInitial": "J.", + "importedId": "TOqXlWBpO6e3aPf0zAGmlA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169936, + "firstName": "Max", + "lastName": "Kreminski", + "middleInitial": "", + "importedId": "GaRE7kP9lRv2HLmcdI-Wwg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169937, + "firstName": "Mathias", + "lastName": "Lystbæk", + "middleInitial": "N.", + "importedId": "Tj3NjTKO6GPpocMqwX-RrA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169938, + "firstName": "Junyoung", + "lastName": "Choi", + "middleInitial": "", + "importedId": "JM3cSPFsUtbvP4pxlL0Cyg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169939, + "firstName": "Kyle", + "lastName": "Heinz", + "middleInitial": "", + "importedId": "tPEw_bY1kQGRaOQlNDXGpw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169940, + "firstName": "Jeffrey", + "lastName": "Tao", + "middleInitial": "", + "importedId": "EDBiiTBcQGcVHBSXekJvYA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169941, + "firstName": "Haokun", + "lastName": "Wang", + "middleInitial": "", + "importedId": "2D4FxgVjHJXtpkTa34xCBQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169942, + "firstName": "Steven", + "lastName": "Espinosa", + "middleInitial": "", + "importedId": "iWK79inilMeFQBOEJ793xg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169943, + "firstName": "May", + "lastName": "Yu", + "middleInitial": "", + "importedId": "bhtm9P9GHs2I3pHwchSoJw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169944, + "firstName": "Sungho", + "lastName": "Jo", + "middleInitial": "", + "importedId": "P2-22FDjncvn3aJW49cuRQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169945, + "firstName": "Lauren", + "lastName": "Franz", + "middleInitial": "", + "importedId": "kf_I7FtI3Mvm9lrkCIhZSA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169946, + "firstName": "Jingle", + "lastName": "Huang", + "middleInitial": "", + "importedId": "lO_gxmrSIHAdx76QTIJwFQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169947, + "firstName": "Yu-Kai", + "lastName": "Hung", + "middleInitial": "", + "importedId": "930PM67GfyT4SZLtpafZxQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169948, + "firstName": "Iñigo", + "lastName": "Ezcurdia", + "middleInitial": "", + "importedId": "Nekc-4n1s4bvDBKt0Z4lpA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169949, + "firstName": "Daehwa", + "lastName": "Kim", + "middleInitial": "", + "importedId": "wofoprAgV6gzBo3Cazk5RA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169950, + "firstName": "Michael", + "lastName": "Sedlmair", + "middleInitial": "", + "importedId": "xGNIKKkYEa1b1udny_xzDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169951, + "firstName": "Jiannan", + "lastName": "Li", + "middleInitial": "", + "importedId": "OiGdqwT8ZAaiPuG6sw08aA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169952, + "firstName": "Pornthep", + "lastName": "Preechayasomboon", + "middleInitial": "", + "importedId": "7lQr-2OEk9dqjvtPzc5YqQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169953, + "firstName": "Andreia", + "lastName": "Valente", + "middleInitial": "", + "importedId": "J4oGlcMBicJMvd_8wld0bg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169954, + "firstName": "Mario", + "lastName": "Bracklein", + "middleInitial": "", + "importedId": "zfHFMvvZs3snOgBXGFczHA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169955, + "firstName": "Mukund", + "lastName": "Varma T", + "middleInitial": "", + "importedId": "UkVa91mg1T33_Qh5_e4MUA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169956, + "firstName": "Linguang", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "u7rns3kwjhbPzqSMyGJONw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169957, + "firstName": "Pei", + "lastName": "Chen", + "middleInitial": "", + "importedId": "6MLHzWz-HfsPDtyTHQt7Qw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169958, + "firstName": "Bradford", + "lastName": "Snow", + "middleInitial": "J", + "importedId": "1YRnTjY-vyvG26NOpwFcpg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169959, + "firstName": "Chenfeng", + "lastName": "Gao", + "middleInitial": "", + "importedId": "8NioDhxMAahWU_PG1s5fMw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169960, + "firstName": "Hunter", + "lastName": "Mathews", + "middleInitial": "G", + "importedId": "EyGPvmguV515SJaLqXA-MA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169961, + "firstName": "Benjamin", + "lastName": "Stoddart", + "middleInitial": "", + "importedId": "9A7sRfM2_EHqoZ2aeXjG0w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169962, + "firstName": "Deval", + "lastName": "Panchal", + "middleInitial": "", + "importedId": "4cO7jnAJp_FfrM33GoDjfg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169963, + "firstName": "Mina", + "lastName": "Huh", + "middleInitial": "", + "importedId": "3DJ9vRXdYSYSAX1SuhfR7A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169964, + "firstName": "Bjoern", + "lastName": "Hartmann", + "middleInitial": "", + "importedId": "UeMY1SsBRzOg4bVxZSCmuA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169965, + "firstName": "Mitsuhiro", + "lastName": "Kamezaki", + "middleInitial": "", + "importedId": "2AqW2JQO9kaFqDll4s25yw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169966, + "firstName": "Mona", + "lastName": "Shimizu", + "middleInitial": "", + "importedId": "OACm9AUGe4AZhddQK7b8ew", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169967, + "firstName": "Adriana", + "lastName": "Hilliard", + "middleInitial": "", + "importedId": "KbyaDMGXACZte7ldtrPJ3A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169968, + "firstName": "Hyeonsu", + "lastName": "Kang", + "middleInitial": "B", + "importedId": "gCmsrQ1XRAePogWZ7jZMEw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169969, + "firstName": "Subhashini", + "lastName": "Venugopalan", + "middleInitial": "", + "importedId": "-CQ8-3xlpENMWoKbD8Zacw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169970, + "firstName": "Dieter", + "lastName": "Fox", + "middleInitial": "", + "importedId": "y6j4hHU9_buY6iibglY4Tg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169971, + "firstName": "Uta", + "lastName": "Wagner", + "middleInitial": "", + "importedId": "0bMqZ42IAfctzr-cCelBSA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169972, + "firstName": "Francesca", + "lastName": "Lameiro", + "middleInitial": "", + "importedId": "ByCuovwxidYXNR3V4X-piA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169973, + "firstName": "Toby", + "lastName": "Li", + "middleInitial": "Jia-Jun", + "importedId": "il6-1O1vU0p_l7aSJcoJ4g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169974, + "firstName": "Aditya", + "lastName": "Retnanto", + "middleInitial": "", + "importedId": "7wx3yDbGGbhVOIsyJZRqmw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169975, + "firstName": "Shefali", + "lastName": "Patel", + "middleInitial": "", + "importedId": "atgPaQIAq7SW2tY4cO4ABg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169976, + "firstName": "Hongyu", + "lastName": "Mao", + "middleInitial": "", + "importedId": "phWh_LjOc_jQ1A4i0sLAzg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169977, + "firstName": "Pradeep Raj", + "lastName": "Krishnappa Babu", + "middleInitial": "", + "importedId": "JukjgEUwwbHkhGI0sF38Yg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169978, + "firstName": "Thomas", + "lastName": "Kern", + "middleInitial": "", + "importedId": "c1P9fNubH55KH3l-JoPl0w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169979, + "firstName": "Danielle", + "lastName": "Lottridge", + "middleInitial": "", + "importedId": "I7iaVtRQy9Xkd-_PcOFQTQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169980, + "firstName": "Zihan", + "lastName": "Gao", + "middleInitial": "", + "importedId": "B4CyuRikjCMYKdxLRRIXWg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169981, + "firstName": "Xia", + "lastName": "Su", + "middleInitial": "", + "importedId": "J4pksFIR03W2_In-JLQNYA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169982, + "firstName": "Sinyu", + "lastName": "Lai", + "middleInitial": "", + "importedId": "KOIkhV99lZn5rFwMXa_Alg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169983, + "firstName": "Kazuyuki", + "lastName": "Fujita", + "middleInitial": "", + "importedId": "e-KfkiPFbFPUYASETBkUtA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169984, + "firstName": "Xiao", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "q-D7gzshh34H7l1lQbkszQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169985, + "firstName": "Shyamnath", + "lastName": "Gollakota", + "middleInitial": "", + "importedId": "Zr7Y7Z4_815xPro-0r9GxQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169986, + "firstName": "David", + "lastName": "Coyle", + "middleInitial": "", + "importedId": "pJUk21jf5tBbz0m-DGQKFw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169987, + "firstName": "Shao-en", + "lastName": "Ma", + "middleInitial": "", + "importedId": "6g4PtWZ3ACHNL22yy0vEHw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169988, + "firstName": "Jiawen", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "rb8cnYbUXHiFmoAU2v8Bng", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169989, + "firstName": "Akifumi", + "lastName": "Takahashi", + "middleInitial": "", + "importedId": "Up0Lt4f07m8L1USIs52wsw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169990, + "firstName": "Jörg", + "lastName": "Müller", + "middleInitial": "", + "importedId": "9GFxpxzkFRM4VikuvGPEKg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169991, + "firstName": "Jessie", + "lastName": "Yuan", + "middleInitial": "", + "importedId": "c8UjOdiIdHKmZwuQBDS_jw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169992, + "firstName": "Christoph", + "lastName": "Gebhardt", + "middleInitial": "", + "importedId": "sqi8n0FfEM1tKtLA64ElSA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169993, + "firstName": "Daekun", + "lastName": "Kim", + "middleInitial": "", + "importedId": "p0_MRGQMSkIM39nQWb3tUw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169994, + "firstName": "Siyi", + "lastName": "Ren", + "middleInitial": "", + "importedId": "oMX9s15doBrVbcBrkslhyQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169995, + "firstName": "Lauren", + "lastName": "Nigri", + "middleInitial": "", + "importedId": "1OKZ7TTRzNJG_kK3IQK1ew", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169996, + "firstName": "Zeyu", + "lastName": "Wang", + "middleInitial": "", + "importedId": "YsTO8sPaYRW7y1rUJI6Jgg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169997, + "firstName": "Hans", + "lastName": "Gellersen", + "middleInitial": "", + "importedId": "PGAxbQNYlICCCDw00tS7lQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169998, + "firstName": "Jaewook", + "lastName": "Lee", + "middleInitial": "", + "importedId": "Ko2N9T8zAz-zPPa8nd9IBg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 169999, + "firstName": "Yasaman", + "lastName": "Sefidgar", + "middleInitial": "S.", + "importedId": "T7J7Cz1qYZ20Y2LXVckqnA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170000, + "firstName": "Xiao", + "lastName": "Xie", + "middleInitial": "", + "importedId": "hosAhoy1tie7pj8sYrxWBQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170001, + "firstName": "Kunihiro", + "lastName": "Kato", + "middleInitial": "", + "importedId": "G8FYHR0NdQQx5WIpETSFKQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170002, + "firstName": "Markus", + "lastName": "Klar", + "middleInitial": "", + "importedId": "ZA0blB-jyfETjwo8IFZanw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170003, + "firstName": "Joel", + "lastName": "Chan", + "middleInitial": "", + "importedId": "Ez78ia8tP-i2bDyxkR9rQQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170004, + "firstName": "Jan Henry", + "lastName": "Belz", + "middleInitial": "", + "importedId": "pUM_1hyDswZkO8GF6NM-jA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170005, + "firstName": "Tong", + "lastName": "Sun", + "middleInitial": "Steven", + "importedId": "ISawUVp8HwkP_Lo9vj9M9w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170006, + "firstName": "Mar", + "lastName": "Gonzalez-Franco", + "middleInitial": "", + "importedId": "WzJiSxt7y46Nu61rYGdhgA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170007, + "firstName": "Jiawen", + "lastName": "Zhu", + "middleInitial": "Stefanie", + "importedId": "76ctyd8KRRZjK9m79KNZzg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170008, + "firstName": "Yulin", + "lastName": "Shen", + "middleInitial": "", + "importedId": "uxun6wUjVRG5ocgu1s_NaA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170009, + "firstName": "Joshua", + "lastName": "Yang", + "middleInitial": "Kong", + "importedId": "HVV54zyl89vJF6Ynrya7XQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170010, + "firstName": "Shreya", + "lastName": "Shankar", + "middleInitial": "", + "importedId": "yHjWMVfi6BiW9QntnnlCCA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170011, + "firstName": "Giles", + "lastName": "Blaney", + "middleInitial": "", + "importedId": "W-D0MwRnDUAlgf8njQE8xg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170012, + "firstName": "Jiaji", + "lastName": "Li", + "middleInitial": "", + "importedId": "ngCZ_WbPTjbycakkfHYywQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170013, + "firstName": "Andreea", + "lastName": "Danielescu", + "middleInitial": "", + "importedId": "PCMagvyHI4hT42yGNaL73A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170014, + "firstName": "Cheng", + "lastName": "Yao", + "middleInitial": "", + "importedId": "Ohqz43BsCXD0b3a3RoxLRg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170015, + "firstName": "Colin", + "lastName": "Hascup", + "middleInitial": "", + "importedId": "VaQ66olB4JibwME5eHyZuw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170016, + "firstName": "Abdelkareem", + "lastName": "Bedri", + "middleInitial": "", + "importedId": "NMG_0eu4Lml4TffMUPvrRQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170017, + "firstName": "Taejun", + "lastName": "Kim", + "middleInitial": "", + "importedId": "7Y8WEIZBkmKyDUuvx8UnqA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170018, + "firstName": "Jiho", + "lastName": "Kim", + "middleInitial": "", + "importedId": "H4xrCmw313frELTRjYfz8g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170019, + "firstName": "Michael", + "lastName": "Nebeling", + "middleInitial": "", + "importedId": "7Kex_0dKVNSpniXyyTYpBw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170020, + "firstName": "Qihang", + "lastName": "Shan", + "middleInitial": "", + "importedId": "cImxMp810YxhFhcgKU_cqg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170021, + "firstName": "Alex", + "lastName": "Olwal", + "middleInitial": "", + "importedId": "2JAYtPYSaIWyiAiP0PckPQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170022, + "firstName": "Yi", + "lastName": "Wen", + "middleInitial": "", + "importedId": "U-5tw8_-Yq6X79z4OoS3Uw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170023, + "firstName": "Panayu", + "lastName": "Keelawat", + "middleInitial": "", + "importedId": "kD9XJuiKad1K17QcmLN_Ig", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170024, + "firstName": "Dhruv", + "lastName": "Jain", + "middleInitial": "", + "importedId": "si0FI_f98MJQTO6HrC85vQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170025, + "firstName": "Caiming", + "lastName": "Xiong", + "middleInitial": "", + "importedId": "-9S3RAb_8yPgxQidYgDPBw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170026, + "firstName": "Xianqing", + "lastName": "Jia", + "middleInitial": "", + "importedId": "cO_loYqCoJ1Ku2SCeRffiw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170027, + "firstName": "Nhan", + "lastName": "Tran", + "middleInitial": "", + "importedId": "3UwT89oPh-IMEOuIrFzd0A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170028, + "firstName": "David", + "lastName": "Sussillo", + "middleInitial": "", + "importedId": "BKkC9u3UTJh0WB7Xb2yoew", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170029, + "firstName": "Zihao", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "UqSVEM5THGBHYuJdz2jUKA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170030, + "firstName": "Liuqing", + "lastName": "Chen", + "middleInitial": "", + "importedId": "dHx_KkNBHZe-3QlhlDe7_Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170031, + "firstName": "Jane", + "lastName": "Hoffswell", + "middleInitial": "", + "importedId": "GHs1unNX8EJNecXs_xATBg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170032, + "firstName": "Yongtao", + "lastName": "Tang", + "middleInitial": "", + "importedId": "KsRfT5oDyCcHK4ezgm3MKg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170033, + "firstName": "Shuainan", + "lastName": "Ye", + "middleInitial": "", + "importedId": "JBMWVlG47tyzZ9anxjkktA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170034, + "firstName": "Junzhe", + "lastName": "Ji", + "middleInitial": "", + "importedId": "HBersIGARR2I5GvFAxTk1A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170035, + "firstName": "Yu", + "lastName": "Jiang", + "middleInitial": "", + "importedId": "e8rWewnb2rSS1gJ7UAiipA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170036, + "firstName": "Ethan", + "lastName": "Chen", + "middleInitial": "", + "importedId": "Uk59hbVcdF0QjMlAB5Dm0A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170037, + "firstName": "Yuhang", + "lastName": "Zhao", + "middleInitial": "", + "importedId": "grxEwavZH6KBaCspy1-laA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170038, + "firstName": "Jiexin", + "lastName": "Ding", + "middleInitial": "", + "importedId": "3kDiV7d1h7ujieHzcihXug", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170039, + "firstName": "Seongkook", + "lastName": "Heo", + "middleInitial": "", + "importedId": "8u6ENy5fbi7xGnVehduTFg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170040, + "firstName": "Sangho", + "lastName": "Suh", + "middleInitial": "", + "importedId": "LCIY9oz1aK9g8onEOgleUA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170041, + "firstName": "Semina", + "lastName": "Yi", + "middleInitial": "", + "importedId": "UIyUH9y6FUizApI1TD6bOw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170042, + "firstName": "Vikas", + "lastName": "Ashok", + "middleInitial": "", + "importedId": "e9YWattsWNheCfcOx3ZIjw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170043, + "firstName": "Hideki", + "lastName": "Koike", + "middleInitial": "", + "importedId": "RDQHw03ag01lqULc0xa4_w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170044, + "firstName": "Jakki", + "lastName": "Bailey", + "middleInitial": "O.", + "importedId": "raUr4wGY4YHlWcGnC1GkPg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170045, + "firstName": "Yunkai", + "lastName": "Xu", + "middleInitial": "", + "importedId": "7O8yoeZwZofsW9h9zmHZSQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170046, + "firstName": "Haoye", + "lastName": "Dong", + "middleInitial": "", + "importedId": "LHUWoKDPp2L-DyNCXx8-jQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170047, + "firstName": "Ruocong", + "lastName": "Liu", + "middleInitial": "", + "importedId": "StTETKISvmGWM0opHp2zpg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170048, + "firstName": "Florian", + "lastName": "Fischer", + "middleInitial": "", + "importedId": "AbFZu6hv_QZt_zUJKSzBYg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170049, + "firstName": "Andrew", + "lastName": "Tjahjadi", + "middleInitial": "D", + "importedId": "PiujuVW6kMVTfk5sJEVl5Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170050, + "firstName": "Aleksi", + "lastName": "Ikkala", + "middleInitial": "", + "importedId": "ILB9l3NGkRNVN4PxdAmYmg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170051, + "firstName": "Guiyu", + "lastName": "Ma", + "middleInitial": "", + "importedId": "eIFZ2YnbTuWSsW7ZOlcqxA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170052, + "firstName": "Yanying", + "lastName": "Zhu", + "middleInitial": "", + "importedId": "stOBS7MVHb9VskOjvTXmXw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170053, + "firstName": "Chun-Cheng", + "lastName": "Chang", + "middleInitial": "", + "importedId": "9oc8at_kmMllJOGpjIN6yA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170054, + "firstName": "André", + "lastName": "Zenner", + "middleInitial": "", + "importedId": "Y1e1zISAmrQLhVJEP-fAhw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170055, + "firstName": "Jinyang", + "lastName": "Liu", + "middleInitial": "", + "importedId": "jVx5u1IYB8JvMKbkHXKJ2w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170056, + "firstName": "Ziqi", + "lastName": "Yu", + "middleInitial": "", + "importedId": "-d5oBPyybYozTXlsbQGNiA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170057, + "firstName": "Yujie", + "lastName": "Shan", + "middleInitial": "", + "importedId": "WXzbT5aHauNIk2YqtvDTfQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170058, + "firstName": "Hyunjae", + "lastName": "Gil", + "middleInitial": "", + "importedId": "25eJB6OqCVJsRZzmQJrwAw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170059, + "firstName": "Yue", + "lastName": "Yang", + "middleInitial": "", + "importedId": "5t1nNvyKtDf0yxfOhRqLDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170060, + "firstName": "Zhihan", + "lastName": "Zheng", + "middleInitial": "", + "importedId": "HHLTBTMQ9P8hT8oPt1VFww", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170061, + "firstName": "Max", + "lastName": "Murphy", + "middleInitial": "", + "importedId": "GphmmyjbuhSPHzBrxucmvw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170062, + "firstName": "Oron", + "lastName": "Levy", + "middleInitial": "", + "importedId": "IA5_9q2a-yU6VSfKeSZp_A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170063, + "firstName": "Dor", + "lastName": "Abrahamson", + "middleInitial": "", + "importedId": "L_3X6Fd_QZxeBGJK0Ysa5g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170064, + "firstName": "Jason", + "lastName": "Wu", + "middleInitial": "", + "importedId": "AX66TvfSdtUHX3ZgCBIB1Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170065, + "firstName": "Mehmet Kerem", + "lastName": "Turkcan", + "middleInitial": "", + "importedId": "XXi5Gi58iIsXBw_lpFs3SA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170066, + "firstName": "Cassidy", + "lastName": "Cheesman", + "middleInitial": "", + "importedId": "W5EHMe36MBUPBGvorTIQKg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170067, + "firstName": "Chia-Yu", + "lastName": "Cheng", + "middleInitial": "", + "importedId": "d0QqsozXgk4yh3IX386YoQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170068, + "firstName": "Jacob", + "lastName": "Serfaty", + "middleInitial": "", + "importedId": "vDeoOFW9vs4QFkccHAuyuA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170069, + "firstName": "Emily", + "lastName": "Whiting", + "middleInitial": "", + "importedId": "99MfZnG-fz9ShdnNfwginA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170070, + "firstName": "Andrew", + "lastName": "Wilson", + "middleInitial": "D", + "importedId": "IvmaCBTet0szeoVlgtm83A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170071, + "firstName": "Majeed", + "lastName": "Kazemitabaar", + "middleInitial": "", + "importedId": "Ge2r5XbJJkcYZX96kxaEFw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170072, + "firstName": "Yizhong", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "U88Dy2K4-ZvbTEwexszolg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170073, + "firstName": "Shuchang", + "lastName": "Xu", + "middleInitial": "", + "importedId": "7vPlTj7UMCwJtE-SzmTQFw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170074, + "firstName": "Ruyu", + "lastName": "Yan", + "middleInitial": "", + "importedId": "5WXV-zfi7KFCM8fAqBLKJA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170075, + "firstName": "Xinyun", + "lastName": "Cao", + "middleInitial": "", + "importedId": "ilGVSfboC0a4L0Bf4uNCPw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170076, + "firstName": "ziyi", + "lastName": "wang", + "middleInitial": "", + "importedId": "NvRo-JNrjlz-l-NSjdd9UQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170077, + "firstName": "Qirui", + "lastName": "Sun", + "middleInitial": "", + "importedId": "AEH6LtM3NK0QSzfNwSIuBA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170078, + "firstName": "En-Huei", + "lastName": "Wu", + "middleInitial": "", + "importedId": "h0wdjvUqM5G8IESCVjCCUA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170079, + "firstName": "Yang", + "lastName": "Ouyang", + "middleInitial": "", + "importedId": "PwQ9d-PoVJlbbEJnFpooRw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170080, + "firstName": "Tianjian", + "lastName": "Liu", + "middleInitial": "", + "importedId": "-jYMTqDlLyw_qAbabycrEg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170081, + "firstName": "Ashiqur Rahman", + "lastName": "Amit", + "middleInitial": "", + "importedId": "6Y1wPbqvxux4NEoET77SyA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170082, + "firstName": "Zeyu", + "lastName": "Yan", + "middleInitial": "", + "importedId": "mUgdEK5HN9HRQbpOsze3_w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170083, + "firstName": "Duowei", + "lastName": "Xia", + "middleInitial": "", + "importedId": "GWMVwlDo0Doy286b3j3M5g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170084, + "firstName": "Yan-Ying", + "lastName": "Chen", + "middleInitial": "", + "importedId": "oKWJ4OO4tfYf1u7EDv-hXA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170085, + "firstName": "Esteban", + "lastName": "Gomez", + "middleInitial": "", + "importedId": "GXirxLGfyv2H2S1y9zR9zg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170086, + "firstName": "Sophie Ana", + "lastName": "Paris", + "middleInitial": "", + "importedId": "Ttz9Yox_1KLWw3V4NXSNPw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170087, + "firstName": "Victor", + "lastName": "Guimbretiere", + "middleInitial": "F", + "importedId": "61LFQlFcHGFE2CJ6-71eaQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170088, + "firstName": "Changsung", + "lastName": "Lim", + "middleInitial": "", + "importedId": "t1MfsUfswP4qxfQVnL0o6g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170089, + "firstName": "Yingcai", + "lastName": "Wu", + "middleInitial": "", + "importedId": "578wgyxUrFOd78ek47YfAQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170090, + "firstName": "Daniel", + "lastName": "Honrales", + "middleInitial": "", + "importedId": "SsWo7Ht-BRFJA3-9wg6o0w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170091, + "firstName": "Max", + "lastName": "Möbus", + "middleInitial": "", + "importedId": "EXVwJNIrcIINQl-hjwtp-g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170092, + "firstName": "Zhenhui", + "lastName": "Peng", + "middleInitial": "", + "importedId": "QLB33PjBTuHKiEDUE3PJJQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170093, + "firstName": "Lea", + "lastName": "Albaugh", + "middleInitial": "", + "importedId": "HNFoktgk7b8PSTzNm6U1ZA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170094, + "firstName": "Aditya Shekhar", + "lastName": "Nittala", + "middleInitial": "", + "importedId": "Etjddl9lyseaIki_U3rDrQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170095, + "firstName": "Archit", + "lastName": "Tamhane", + "middleInitial": "", + "importedId": "D_5jpWvhd6ml1L8tcKcjdQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170096, + "firstName": "Shiu", + "lastName": "Ng", + "middleInitial": "", + "importedId": "Sv9R1CmRxjW49NXErI3OwA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170097, + "firstName": "Guanhong", + "lastName": "Liu", + "middleInitial": "", + "importedId": "TQB1eu2hLzvKoAJMZ6mY4A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170098, + "firstName": "Sosuke", + "lastName": "Ichihashi", + "middleInitial": "", + "importedId": "QASvKK7iGKEcyxqqjf1IFw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170099, + "firstName": "Riki", + "lastName": "Takizawa", + "middleInitial": "", + "importedId": "dOhr8qM_-HTc5VAvw1pOdQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170100, + "firstName": "David", + "lastName": "Lindlbauer", + "middleInitial": "", + "importedId": "DYAQWrUdOzkbxikvLgQgHg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170101, + "firstName": "Ilan", + "lastName": "Moyer", + "middleInitial": "E", + "importedId": "3cYknIOdRFHKH_QWs7mYgg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170102, + "firstName": "Bingjian", + "lastName": "Huang", + "middleInitial": "", + "importedId": "flX5SQFkMKidAKhm893V6g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170103, + "firstName": "Guanyun", + "lastName": "Wang", + "middleInitial": "", + "importedId": "aNj3BsCl2b0b7FACxYJdKA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170104, + "firstName": "Jian", + "lastName": "Gao", + "middleInitial": "", + "importedId": "mkpQ36HPaJdnoCK56K9X-A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170105, + "firstName": "Philipp", + "lastName": "Wintersberger", + "middleInitial": "", + "importedId": "ppY-s6wNfRIQCeIlfKoDCA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170106, + "firstName": "Michael", + "lastName": "Bernstein", + "middleInitial": "S.", + "importedId": "batNJuLMTvp1yvs1G1gBHg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170107, + "firstName": "Lianyan", + "lastName": "Liu", + "middleInitial": "", + "importedId": "ALEvAXXk7qmEldYUKoQ9cw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170108, + "firstName": "Elodie", + "lastName": "Bouzbib", + "middleInitial": "", + "importedId": "i9w8k_huTo7ovunyMOVLhw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170109, + "firstName": "Stacy", + "lastName": "Cernova", + "middleInitial": "", + "importedId": "DTbtkqUZSDatbub522Ez_A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170110, + "firstName": "Mia", + "lastName": "Tang", + "middleInitial": "", + "importedId": "t5RnWSeq9zHoI3_yZVsDZw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170111, + "firstName": "Yu", + "lastName": "Lu", + "middleInitial": "", + "importedId": "zHPVv12kqwzUnvKbMtJfcQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170112, + "firstName": "Shunan", + "lastName": "Guo", + "middleInitial": "", + "importedId": "Pg1gHjaU0g0X3znG3ju3Uw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170113, + "firstName": "Huaishu", + "lastName": "Peng", + "middleInitial": "", + "importedId": "cISP1gspzEJZEfcOkN6sIg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170114, + "firstName": "Mengjia", + "lastName": "Zhu", + "middleInitial": "", + "importedId": "j2AVTjkRRacwaecKNtDgeA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170115, + "firstName": "Martin", + "lastName": "Feick", + "middleInitial": "", + "importedId": "0D4Zr0Q_AnaOfaM9Sq655Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170116, + "firstName": "YOON", + "lastName": "KIHEON", + "middleInitial": "", + "importedId": "XYK5t6CQoUggYUMsTj6iag", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170117, + "firstName": "Joshua", + "lastName": "Gorniak", + "middleInitial": "", + "importedId": "AkhjT5oZHqbtOL-maQXKQg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170118, + "firstName": "Longxi", + "lastName": "Gao", + "middleInitial": "", + "importedId": "42mcbyD-lR-a39uKJxoQBw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170119, + "firstName": "Yves", + "lastName": "Inglin", + "middleInitial": "", + "importedId": "HxI0h1CgSUoQGYHYPMihHg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170120, + "firstName": "Najja", + "lastName": "Marshall", + "middleInitial": "", + "importedId": "L23eSNDysZg0L4MnooEu4w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170121, + "firstName": "John", + "lastName": "Stasko", + "middleInitial": "", + "importedId": "bzl4LjIgHWKHVU7sD-O6kg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170122, + "firstName": "Xin Yue", + "lastName": "Li", + "middleInitial": "Amanda", + "importedId": "5-WVqj7se9UUG6tnddsYuA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170123, + "firstName": "Xiaohang", + "lastName": "Tang", + "middleInitial": "", + "importedId": "hZ0clmoUOvvE_12D7m_Kdw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170124, + "firstName": "Gabriel", + "lastName": "Lipkowitz", + "middleInitial": "", + "importedId": "hsKOI7-Wl5vZVlVFUuIJkw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170125, + "firstName": "Michael", + "lastName": "Malcolm", + "middleInitial": "C", + "importedId": "JAmOcEOA5mOvGRYZr-6Iow", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170126, + "firstName": "Willa Yunqi", + "lastName": "Yang", + "middleInitial": "", + "importedId": "piUdVxFzSKIxwdS0owntrA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170127, + "firstName": "Till Max", + "lastName": "Eckroth", + "middleInitial": "", + "importedId": "qn2ILOsxQOeRM9SNNmtHBw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170128, + "firstName": "Jiayan", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "1mh9UCsomH5K_lV6hmFbNA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170129, + "firstName": "Chenyi", + "lastName": "Shen", + "middleInitial": "", + "importedId": "ZcWksR1MwkD2YT887-Fknw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170130, + "firstName": "Jiaju", + "lastName": "Ma", + "middleInitial": "", + "importedId": "7b1cKp5CIrE7kyfrDhyh_Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170131, + "firstName": "Zach", + "lastName": "Dive", + "middleInitial": "", + "importedId": "d4uZgaJ_SRutbz-111lEQA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170132, + "firstName": "Suranga", + "lastName": "Nanayakkara", + "middleInitial": "", + "importedId": "BLBvqg3aFav-8-OggSRxoA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170133, + "firstName": "Han", + "lastName": "Wang", + "middleInitial": "", + "importedId": "wktHwVAWujrJbKhGBNwSug", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170134, + "firstName": "Kohei", + "lastName": "Miura", + "middleInitial": "", + "importedId": "vVYXy03eCjWmJ_Z4sEmY1w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170135, + "firstName": "Boyu", + "lastName": "Li", + "middleInitial": "", + "importedId": "Bbn92VZ70QnCe30PwjF7FA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170136, + "firstName": "Junyi", + "lastName": "Zhu", + "middleInitial": "", + "importedId": "e55KVEA-R2fiLofj_Ci0fg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170137, + "firstName": "Saif", + "lastName": "Mahmud", + "middleInitial": "", + "importedId": "oXPFTaZEZB12O8QdgoWGzQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170138, + "firstName": "Dazhen", + "lastName": "Deng", + "middleInitial": "", + "importedId": "vYyryKl_Lmaau_6EJ1qZ9w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170139, + "firstName": "Zhuoshu", + "lastName": "Li", + "middleInitial": "", + "importedId": "kTjYLvRDazHI9IsNw5-a7Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170140, + "firstName": "Tianyu", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "dy9v8virvlCI0fTS3HbmRg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170141, + "firstName": "Grace", + "lastName": "Huang", + "middleInitial": "", + "importedId": "_FGoZopyXeRolHexwOzrNw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170142, + "firstName": "Kora", + "lastName": "Regitz", + "middleInitial": "Persephone", + "importedId": "dVHDSxFGZJCwtJl-_ILhGg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170143, + "firstName": "Maurice", + "lastName": "Rekrut", + "middleInitial": "", + "importedId": "CtOG0x_FB6JK_eKz5ehhwA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170144, + "firstName": "Kevin", + "lastName": "Pu", + "middleInitial": "", + "importedId": "G0sYcmOgMBs-sF4VcCp5Bw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170145, + "firstName": "Qianyi", + "lastName": "Wang", + "middleInitial": "", + "importedId": "dP43C26JZyy7L5mSqXVC5g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170146, + "firstName": "Seung-Jun", + "lastName": "Lee", + "middleInitial": "", + "importedId": "QN6mbJQKJHqrmrqm40QqJw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170147, + "firstName": "Nicholas", + "lastName": "Bentley", + "middleInitial": "", + "importedId": "DJ3_yJV7_BEhUNx7Ya537w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170148, + "firstName": "Diego Adrian", + "lastName": "Gutnisky", + "middleInitial": "", + "importedId": "ouTXPcYtzlXCJCz5USNEpQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170149, + "firstName": "Valkyrie", + "lastName": "Savage", + "middleInitial": "", + "importedId": "S07-pS_XhfJSxmXiqXqgBQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170150, + "firstName": "Naoto", + "lastName": "Nishida", + "middleInitial": "", + "importedId": "aU2vy8rQ8eioC8yVntts2Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170151, + "firstName": "Guangtao", + "lastName": "Xue", + "middleInitial": "", + "importedId": "OFKXQbqct-GMoqzdrUilAw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170152, + "firstName": "Hai", + "lastName": "Dang", + "middleInitial": "", + "importedId": "sbimNJ2MBwJrSQvb3mLEVw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170153, + "firstName": "Xiaojun", + "lastName": "Bi", + "middleInitial": "", + "importedId": "fpWqo1rUmyvEL7HUbPbz6g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170154, + "firstName": "Hao", + "lastName": "Pan", + "middleInitial": "", + "importedId": "wHijscTyaENTXVJqTHXM3Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170155, + "firstName": "Yudai", + "lastName": "Tanaka", + "middleInitial": "", + "importedId": "bZXoz-mKCKaq3xd2DrrlTw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170156, + "firstName": "Lu", + "lastName": "Yuan", + "middleInitial": "", + "importedId": "m7G_n7WJJl5kOuYATJ1-vw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170157, + "firstName": "Huiting", + "lastName": "Liu", + "middleInitial": "", + "importedId": "qJfvA2n_wqQqcyDOXQbQmA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170158, + "firstName": "Mark", + "lastName": "Richardson", + "middleInitial": "", + "importedId": "PuURQvutnP3iC6huDdCYGg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170159, + "firstName": "Audrey", + "lastName": "Girouard", + "middleInitial": "", + "importedId": "rtjAxsI93XtRgU2ZgvZLJw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170160, + "firstName": "Qianxi", + "lastName": "Liu", + "middleInitial": "", + "importedId": "xwFz968ndxZkhhEF3wkgvg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170161, + "firstName": "Hiroki", + "lastName": "Watanabe", + "middleInitial": "", + "importedId": "j9uVhI0O6nKFZu_32F-wkg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170162, + "firstName": "Liqi", + "lastName": "Cheng", + "middleInitial": "", + "importedId": "151PKTE6yWlNG9v3tx0SFA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170163, + "firstName": "Dan", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "pYLPv5N3J_98S810ryjAyg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170164, + "firstName": "Jennifer", + "lastName": "Collinger", + "middleInitial": "", + "importedId": "ec_8yxhD3GZUSIgs4MDfmw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170165, + "firstName": "Xuelong", + "lastName": "Xie", + "middleInitial": "", + "importedId": "Rqw8KfGD3-BlKQ3pFCUT4g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170166, + "firstName": "Linping", + "lastName": "Yuan", + "middleInitial": "", + "importedId": "XIQfLXsDx_lUy8hHnGw2ig", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170167, + "firstName": "Jarin", + "lastName": "Thundathil", + "middleInitial": "", + "importedId": "my0j71FCtGuPoflyNKB4bw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170168, + "firstName": "Alexander", + "lastName": "Kyu", + "middleInitial": "", + "importedId": "zj2xo0RRlEPivJhlOTOORw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170169, + "firstName": "Xiaolong", + "lastName": "Li", + "middleInitial": "", + "importedId": "hp6D-jScfTHRPWvceJFRwA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170170, + "firstName": "Seok-Hyung", + "lastName": "Bae", + "middleInitial": "", + "importedId": "mHgyrQnjuhRIke1ZUM_TkA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170171, + "firstName": "Arvind", + "lastName": "Satyanarayan", + "middleInitial": "", + "importedId": "cz0xX8ktaPmcLlJsCyrWzg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170172, + "firstName": "Xinxin", + "lastName": "Qiu", + "middleInitial": "", + "importedId": "fUGukiuR_5D24OSgGQeVvw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170173, + "firstName": "Lukas", + "lastName": "Teufelberger", + "middleInitial": "", + "importedId": "qLHSAn0CYnpvsJQCiZ8sLw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170174, + "firstName": "Gierad", + "lastName": "Laput", + "middleInitial": "", + "importedId": "-hxGqsWlau2e_x7HgRywaQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170175, + "firstName": "Zackory", + "lastName": "Erickson", + "middleInitial": "", + "importedId": "vN_ktv4MtGGIu12hwEoxyQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170176, + "firstName": "Li", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "yIAC0Pqn2azCcAoIftw5rg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170177, + "firstName": "Qilong", + "lastName": "Cheng", + "middleInitial": "", + "importedId": "W74Bw8c8RPguMD-EKG4tpg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170178, + "firstName": "Sang Ho", + "lastName": "Yoon", + "middleInitial": "", + "importedId": "V1nG1KeCiKE0S_kGh27BVw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170179, + "firstName": "Matthew", + "lastName": "Hong", + "middleInitial": "K.", + "importedId": "PhsTv49gkgk94JqpiWdKug", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170180, + "firstName": "Jun", + "lastName": "Ye", + "middleInitial": "", + "importedId": "af_-3P5TKQjyB_Pl0IPDKg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170181, + "firstName": "Yijian", + "lastName": "Wen", + "middleInitial": "", + "importedId": "vb8dkZF28t4bAUbNNQNcWA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170182, + "firstName": "Yiwen", + "lastName": "Ren", + "middleInitial": "", + "importedId": "h-nMsjCxOSuUdVSFHJBCpQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170183, + "firstName": "Takuto", + "lastName": "Nakamura", + "middleInitial": "", + "importedId": "bNfAZvqyGjS4a4WZsKSsrQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170184, + "firstName": "Tal", + "lastName": "August", + "middleInitial": "", + "importedId": "zOwEHw4cmRW-_iTKDiA0vg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170185, + "firstName": "Munjeong", + "lastName": "Kim", + "middleInitial": "", + "importedId": "xjzgVUOW94CIAowSTY8Eng", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170186, + "firstName": "Tianyi", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "7taies-lvmk7BcDr022wLw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170187, + "firstName": "Maneesh", + "lastName": "Agrawala", + "middleInitial": "", + "importedId": "vEgkfTRwqF0MI1NyYHSNKA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170188, + "firstName": "Janice", + "lastName": "Hixon", + "middleInitial": "", + "importedId": "xGaGRvQKSrBR9tnjnYCYhw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170189, + "firstName": "Kaori", + "lastName": "Ikematsu", + "middleInitial": "", + "importedId": "ydhDL2ZEmScGGp3dMOVqOw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170190, + "firstName": "Yijie", + "lastName": "Guo", + "middleInitial": "", + "importedId": "mrXDKKRv2c7OmXm628ratw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170191, + "firstName": "Jeongju", + "lastName": "Park", + "middleInitial": "", + "importedId": "iK1DXNZhu-subi_eczoAcQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170192, + "firstName": "Hannah", + "lastName": "Twigg-Smith", + "middleInitial": "", + "importedId": "Xa--VOEHd48alXj_kMUuig", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170193, + "firstName": "Simret", + "lastName": "Gebreegziabher", + "middleInitial": "Araya", + "importedId": "C-0eru4VplPOG7fK7CagIw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170194, + "firstName": "Jiasheng", + "lastName": "Li", + "middleInitial": "", + "importedId": "KHjNqBmP5cM1cLgUQ20MNg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170195, + "firstName": "Haipeng", + "lastName": "Mi", + "middleInitial": "", + "importedId": "xhJhgCfvYh1iyoqbNPme0A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170196, + "firstName": "Kazuki", + "lastName": "Takashima", + "middleInitial": "", + "importedId": "Y23h22sxdG2T0yx4JY6Azg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170197, + "firstName": "Yunpeng", + "lastName": "Song", + "middleInitial": "", + "importedId": "Wny3feh9lxU73fI6ILAF5Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170198, + "firstName": "Naoki", + "lastName": "Yoshioka", + "middleInitial": "", + "importedId": "nlRW6rCd0rfeNvKqAdK8Zw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170199, + "firstName": "Pragya", + "lastName": "Kallanagoudar", + "middleInitial": "", + "importedId": "X1V0KvQ9VrpNHbq-E1oRjQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170200, + "firstName": "Mia Huong", + "lastName": "Nguyen", + "middleInitial": "", + "importedId": "lSzE0640VOZEUzSRM3z85A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170201, + "firstName": "Ercan", + "lastName": "Altinsoy", + "middleInitial": "", + "importedId": "JrDCrv6FXu6XUJ0bTQgSew", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170202, + "firstName": "Insik", + "lastName": "Shin", + "middleInitial": "", + "importedId": "9XzKTsq5CiYtgnDg0-YAtw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170203, + "firstName": "Mahshid", + "lastName": "Ghasemi", + "middleInitial": "", + "importedId": "5NAcssqRjzabjcdDbG8i_Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170204, + "firstName": "Kotaro", + "lastName": "Kitada", + "middleInitial": "", + "importedId": "7uwDOyZ83VrQq1VKP--gog", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170205, + "firstName": "Takegi", + "lastName": "Yoshimoto", + "middleInitial": "", + "importedId": "IaHqh-UCBTqdc4HdVs5GZA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170206, + "firstName": "Paul", + "lastName": "Dietz", + "middleInitial": "H", + "importedId": "jOW7EdVUqW1tGRfS3LC6eA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170207, + "firstName": "Daniel", + "lastName": "Wigdor", + "middleInitial": "", + "importedId": "Tiw2UpMIqNaYXKdFxVWQYw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170208, + "firstName": "Yujie", + "lastName": "Zhou", + "middleInitial": "", + "importedId": "UpvjV6-GWRED3uXZgxKIxw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170209, + "firstName": "Yujia", + "lastName": "Liu", + "middleInitial": "", + "importedId": "bkqlkNkrwVPMhuwGBhX8rA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170210, + "firstName": "Dian", + "lastName": "Ding", + "middleInitial": "", + "importedId": "G-SvJRYW_XbXZ7DDMcoh9Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170211, + "firstName": "Lei", + "lastName": "Ren", + "middleInitial": "", + "importedId": "ip6lO0ZwMCVlOckboM4GFQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170212, + "firstName": "Alexander", + "lastName": "Lingler", + "middleInitial": "", + "importedId": "cE2CKbCAGPYS3NG7Ovr1RA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170213, + "firstName": "HyunA", + "lastName": "Seo", + "middleInitial": "", + "importedId": "uejWbiaQwwjQPGofEhB23Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170214, + "firstName": "Matthew", + "lastName": "Lee", + "middleInitial": "L", + "importedId": "LukkGyhKsCQKUxnpdXQT3w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170215, + "firstName": "Billy", + "lastName": "Shi", + "middleInitial": "", + "importedId": "VecNOMh45lXZxT18dZoj_Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170216, + "firstName": "Yuecheng", + "lastName": "Peng", + "middleInitial": "", + "importedId": "r4wSpaWtqZ5Ldqf9Zc2W-A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170217, + "firstName": "Richa", + "lastName": "Pandey", + "middleInitial": "", + "importedId": "s3EUw2bm2zqlHu_ibZB_MQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170218, + "firstName": "Ranjay", + "lastName": "Krishna", + "middleInitial": "", + "importedId": "6I7eZuoa0D8a52WSjgZ7_Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170219, + "firstName": "Xi Laura", + "lastName": "Cang", + "middleInitial": "", + "importedId": "axK-s6oElI4ppBkH2eSAMg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170220, + "firstName": "Irmandy", + "lastName": "Wicaksono", + "middleInitial": "", + "importedId": "eXH4jBLcdI3xRJ5yJAi9fg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170221, + "firstName": "Yalong", + "lastName": "Yang", + "middleInitial": "", + "importedId": "aBVmqwNznuZ1qqpvt3hdAg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170222, + "firstName": "Tetsushi", + "lastName": "Ikeda", + "middleInitial": "", + "importedId": "FPac9HANkq0kQfntkJRglQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170223, + "firstName": "Yunxiao", + "lastName": "Wang", + "middleInitial": "", + "importedId": "M2fwtaa-uWbFvOjY8UIe8w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170224, + "firstName": "Lining", + "lastName": "Yao", + "middleInitial": "", + "importedId": "wy-8_BE-UutnZpdDwndBwA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170225, + "firstName": "Huaxin", + "lastName": "Wei", + "middleInitial": "", + "importedId": "0cVuh_jOXmDlUiN5UBvvfw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170226, + "firstName": "Christopher", + "lastName": "Collins", + "middleInitial": "", + "importedId": "lEpflhaARjgjLLfjRHiIoQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170227, + "firstName": "Eiji", + "lastName": "Hayashi", + "middleInitial": "", + "importedId": "c7kSlZzIXEyNoqzS6yQG4g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170228, + "firstName": "Gilbert", + "lastName": "Bernstein", + "middleInitial": "", + "importedId": "1DDcb2MRXIGLvlAeSNoTOA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170229, + "firstName": "Joe", + "lastName": "Finney", + "middleInitial": "", + "importedId": "tksyFxZ85WhtCiu_BJBMUw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170230, + "firstName": "Ludwig", + "lastName": "Sidenmark", + "middleInitial": "", + "importedId": "W7TKijmt2kRjU8CpumMHnA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170231, + "firstName": "Junyi", + "lastName": "Zhao", + "middleInitial": "", + "importedId": "U47Fz9-kyhJoXF_rK3KmYA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170232, + "firstName": "Juntao", + "lastName": "Zhou", + "middleInitial": "", + "importedId": "_asaI1CXp5xlzWm2X0tLyw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170233, + "firstName": "Chaoyi", + "lastName": "Lin", + "middleInitial": "", + "importedId": "GOXpQHClCsFsrwsXvsIT2A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170234, + "firstName": "Seoyun", + "lastName": "Son", + "middleInitial": "", + "importedId": "NBiIWzAGBpSV3-so1g1naQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170235, + "firstName": "Jiahao", + "lastName": "Li", + "middleInitial": "Nick", + "importedId": "bgSnRcUdmELWFP0B2fFbIg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170236, + "firstName": "Shun", + "lastName": "Hanai", + "middleInitial": "", + "importedId": "8P4ADZTsVSS2cPhNteAO7g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170237, + "firstName": "Uksang", + "lastName": "Yoo", + "middleInitial": "", + "importedId": "tg5WMkL7w6BSY4DAWCEp_Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170238, + "firstName": "Thomas", + "lastName": "Leimkühler", + "middleInitial": "", + "importedId": "CXTV81PVn0XQraxD4DCYoQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170239, + "firstName": "Steve", + "lastName": "Oney", + "middleInitial": "", + "importedId": "sMjcEFlGeL901IRYOpJFDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170240, + "firstName": "Mark", + "lastName": "Richardson", + "middleInitial": "", + "importedId": "ykRc9JNaEpWjfBzzOeVAsw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170241, + "firstName": "Ian", + "lastName": "Drosos", + "middleInitial": "", + "importedId": "Ak_eF4-oZWR2opwFa-8nSA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170242, + "firstName": "Jesse", + "lastName": "Vig", + "middleInitial": "", + "importedId": "3Wq4vS7MJaW9DDNlX2lbdA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170243, + "firstName": "Emily", + "lastName": "Xie", + "middleInitial": "Liying", + "importedId": "J-CKTvQmw5OcIDDLrQR5AA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170244, + "firstName": "Ken", + "lastName": "Nakagaki", + "middleInitial": "", + "importedId": "lsYt2lERJ_k5SfHL9Sztcw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170245, + "firstName": "Alexander", + "lastName": "Bakogeorge", + "middleInitial": "", + "importedId": "PvkOKxDHUTMDKIHG2r4NiQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170246, + "firstName": "Anav", + "lastName": "Chaudhary", + "middleInitial": "", + "importedId": "bcZqNwWEpfeVpQckgOUudg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170247, + "firstName": "Yu", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "n4KrA8dWYAC7Y47PtOQ-dg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170248, + "firstName": "Raouf", + "lastName": "Abujaber", + "middleInitial": "", + "importedId": "U2FClfT1OTm3iiHfpQOuhw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170249, + "firstName": "Evan", + "lastName": "Torrence", + "middleInitial": "", + "importedId": "KMM4Pr3gvZvMiXQ7NRe8YA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170250, + "firstName": "Yun Suen", + "lastName": "Pai", + "middleInitial": "", + "importedId": "XlB4ArLajcCfMwb9KBUFBA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170251, + "firstName": "Aniket", + "lastName": "Kittur", + "middleInitial": "", + "importedId": "dvITl7CjKRGhLNC7tWO8aw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170252, + "firstName": "Tianyi", + "lastName": "Wang", + "middleInitial": "", + "importedId": "oBPFyRs7TRlZaYrUGRdbrA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170253, + "firstName": "Karthik", + "lastName": "Ramani", + "middleInitial": "", + "importedId": "ltnbQF0vXB-RYkZ1s9rlgg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170254, + "firstName": "Shumin", + "lastName": "Zhai", + "middleInitial": "", + "importedId": "W_RCVI8Bqn5DhSURFBnu8g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170255, + "firstName": "Augusto", + "lastName": "Esteves", + "middleInitial": "", + "importedId": "7f2iQ_8H8O1G87Aj2KDagw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170256, + "firstName": "Masatoshi", + "lastName": "Hamanaka", + "middleInitial": "", + "importedId": "WxMgc_YluJaF5Oj64yDhZA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170257, + "firstName": "Sieun", + "lastName": "Kim", + "middleInitial": "", + "importedId": "UfbdU-kHl8C2FZa7bZm5ig", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170258, + "firstName": "Geraldine", + "lastName": "Dawson", + "middleInitial": "", + "importedId": "ql1v3OwEQ2GjkaBPVv36vA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170259, + "firstName": "Yoshiki", + "lastName": "Minato", + "middleInitial": "", + "importedId": "KjuF37T0Y0oMcP9z1TLfGg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170260, + "firstName": "Chenghao", + "lastName": "Pan", + "middleInitial": "", + "importedId": "8FelJMhwqSlf9z1jh26t7A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170261, + "firstName": "Eric", + "lastName": "Whitmire", + "middleInitial": "", + "importedId": "ZvzHGj8ux9_EXihByugvsA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170262, + "firstName": "Chia-An", + "lastName": "Fan", + "middleInitial": "", + "importedId": "oLDLKHEz-WGChAQl57XpiA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170263, + "firstName": "Qi", + "lastName": "Wang", + "middleInitial": "", + "importedId": "cUBJOm_C2y7u-RRVpdQzbQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170264, + "firstName": "Peter", + "lastName": "Walkington", + "middleInitial": "", + "importedId": "CL8pGcsge4j9L3DHXwU0Lw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170265, + "firstName": "Daniel", + "lastName": "Weiner", + "middleInitial": "", + "importedId": "k4Gheh1JYMeSHED6keUoNA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170266, + "firstName": "Abe", + "lastName": "Davis", + "middleInitial": "", + "importedId": "GuGJPj5RBrFll7dqys_ikQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170267, + "firstName": "Philipp", + "lastName": "Hallgarten", + "middleInitial": "", + "importedId": "3PzdgiteqjND8wOV00lpBQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170268, + "firstName": "Xiaojuan", + "lastName": "Ma", + "middleInitial": "", + "importedId": "tOS6C4kPL7t30lwzz_2A4g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170269, + "firstName": "Andreas", + "lastName": "Asferg Jacobsen", + "middleInitial": "", + "importedId": "sUf25B2eHI9tqPNKsqE_yg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170270, + "firstName": "Andrea", + "lastName": "Colaço", + "middleInitial": "", + "importedId": "RTiEZFe1SR4PU_ub9q5QRQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170271, + "firstName": "Anup", + "lastName": "Sathya", + "middleInitial": "", + "importedId": "ZjNYEfkCTRnIr8xuZScj-g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170272, + "firstName": "Tamil Selvan", + "lastName": "Gunasekaran", + "middleInitial": "", + "importedId": "HE2vYKSlsZslrL5ODruSAg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170273, + "firstName": "Jaime", + "lastName": "Lien", + "middleInitial": "", + "importedId": "6pN1wQO0gHMCcn3MJrXdyA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170274, + "firstName": "Rachel", + "lastName": "Stukenborg", + "middleInitial": "", + "importedId": "ioncC0NGU-cqEMvapLt-sA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170275, + "firstName": "Devyani", + "lastName": "McLaren", + "middleInitial": "", + "importedId": "2FA41rkaVKAtvadA1jqn7g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170276, + "firstName": "Purnima", + "lastName": "Padmanabhan", + "middleInitial": "", + "importedId": "6apbK6hQqswhRaq_4D1_Dw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170277, + "firstName": "Zeyu", + "lastName": "Huang", + "middleInitial": "", + "importedId": "FYEpoWu4BY_2g-VUf2l3QQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170278, + "firstName": "Shogo", + "lastName": "Tomaru", + "middleInitial": "", + "importedId": "xyCb0oBmVoqXhakTUBi8cA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170279, + "firstName": "Liang", + "lastName": "Wang", + "middleInitial": "", + "importedId": "_DpLwFt1jbNiBQ_rgD0uCQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170280, + "firstName": "Soheil", + "lastName": "Kianzad", + "middleInitial": "", + "importedId": "jzn3gGO0knPnG8UAayDtNQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170281, + "firstName": "Sheng Cian", + "lastName": "Lee", + "middleInitial": "", + "importedId": "C2tt6ANCJsw_BmKu3HllGA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170282, + "firstName": "Patrick", + "lastName": "Baudisch", + "middleInitial": "", + "importedId": "o7VwMA3PuVJclKFtwCL87A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170283, + "firstName": "Henny", + "lastName": "Admoni", + "middleInitial": "", + "importedId": "PvQifVLg7sDurlZBviRebQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170284, + "firstName": "Yue", + "lastName": "Jiang", + "middleInitial": "", + "importedId": "ahbKJX6tZ-HRai4ehEHC-w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170285, + "firstName": "Yiyuan", + "lastName": "Sun", + "middleInitial": "", + "importedId": "ZVVlrrsESPaA_nOG-yz6ZQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170286, + "firstName": "Karin", + "lastName": "Ohara", + "middleInitial": "", + "importedId": "HZv8RPQNLE_MGwP7cerSIw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170287, + "firstName": "Youngki", + "lastName": "Lee", + "middleInitial": "", + "importedId": "uBOK1lEVpZtbGebYcLJcog", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170288, + "firstName": "Noah", + "lastName": "Ponto", + "middleInitial": "", + "importedId": "p1BHAdAVP3OXxJDc4Rxp-g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170289, + "firstName": "Feng", + "lastName": "Tian", + "middleInitial": "", + "importedId": "e4x280wrNe6DjEzW6pjtvQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170290, + "firstName": "Lingyun", + "lastName": "Yu", + "middleInitial": "", + "importedId": "R7Nh49O-uyOOyzPxWd1VsQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170291, + "firstName": "Dizhi", + "lastName": "Ma", + "middleInitial": "", + "importedId": "oAY6o04sA6YcCdP3v1oq7Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170292, + "firstName": "Cedric", + "lastName": "Ho", + "middleInitial": "", + "importedId": "UoZU4ePuEPckRKKn_19Trg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170293, + "firstName": "Anyi", + "lastName": "Rao", + "middleInitial": "", + "importedId": "f7f8uIt7O1MBP3qpw4tVrg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170294, + "firstName": "Ross", + "lastName": "Daly", + "middleInitial": "", + "importedId": "Y8gbq1lXIyTP3EIlMZUzfw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170295, + "firstName": "Alexandra", + "lastName": "Ion", + "middleInitial": "", + "importedId": "nPzcOO-p9-FlXrBByTE0kg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170296, + "firstName": "Yoshifumi", + "lastName": "Kitamura", + "middleInitial": "", + "importedId": "XwNdfZ209hp61dXkmG71tA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170297, + "firstName": "Yotam", + "lastName": "Gingold", + "middleInitial": "", + "importedId": "3ce-PdFWtR4maqkeBtBUfQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170298, + "firstName": "Anhong", + "lastName": "Guo", + "middleInitial": "", + "importedId": "EE_pQo_cejBcQ4Xv5yZujw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170299, + "firstName": "Zulekha", + "lastName": "Karachiwalla", + "middleInitial": "", + "importedId": "4Mqe62KErgG_YWBUuSJ5Vg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170300, + "firstName": "Tobias", + "lastName": "Grosse-Puppendahl", + "middleInitial": "", + "importedId": "BI7c3UvafMc98Ev8dJ5nzQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170301, + "firstName": "Gaurav", + "lastName": "Jain", + "middleInitial": "", + "importedId": "SEPfc3LF6kZtFUhIFL4eEw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170302, + "firstName": "Gromit Yeuk-Yin", + "lastName": "Chan", + "middleInitial": "", + "importedId": "kTzyirEDKhu7W9B5pAAyWA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170303, + "firstName": "Junichiro", + "lastName": "Kadomoto", + "middleInitial": "", + "importedId": "Ye50LQGEOhRzRURPbdvSLg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170304, + "firstName": "Ryo", + "lastName": "Masuda", + "middleInitial": "", + "importedId": "Wf2FwACasXM_sHb6B7Lcig", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170305, + "firstName": "Mehmet", + "lastName": "Ozdemir", + "middleInitial": "", + "importedId": "PzHt18ye5izd8Q6BWPgbiw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170306, + "firstName": "Svetlana", + "lastName": "Yarosh", + "middleInitial": "", + "importedId": "JAfxlDfoMKx7_h7uPj4g_w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170307, + "firstName": "Linxin", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "9TyKkKNoNfTrs8OHknvh3w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170308, + "firstName": "Jingming", + "lastName": "Dong", + "middleInitial": "", + "importedId": "GLG6_fKVyDT-cGSxhYwWzA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170309, + "firstName": "Ruowang", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "qshQMMJokuiafW6kdYnTSw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170310, + "firstName": "Xing-Dong", + "lastName": "Yang", + "middleInitial": "", + "importedId": "qHjKmi-3r1K3BNbRMD2C6A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170311, + "firstName": "Kouta", + "lastName": "Minamizawa", + "middleInitial": "", + "importedId": "cGigxc4f9I7rVVWPqzvZSQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170312, + "firstName": "Manabu", + "lastName": "Yoshida", + "middleInitial": "", + "importedId": "ZFOQRTj8IIzG45qa78VpRQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170313, + "firstName": "yachun", + "lastName": "fan", + "middleInitial": "", + "importedId": "4mP3qf-nf4Wf1H-VjPsYZw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170314, + "firstName": "Nabin", + "lastName": "Khanal", + "middleInitial": "", + "importedId": "L5Uuw8csDdAekX3yMjkZnA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170315, + "firstName": "Cristi", + "lastName": "Isaula-Reyes", + "middleInitial": "", + "importedId": "OsqB_PNCXWv7vRZHQBtXVA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170316, + "firstName": "Lorraine", + "lastName": "Underwood", + "middleInitial": "", + "importedId": "b2g5Lirc21r5rpoGEMU1Aw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170317, + "firstName": "Eunyee", + "lastName": "Koh", + "middleInitial": "", + "importedId": "PMoKh21thXimpL9JTzoZJg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170318, + "firstName": "Jose", + "lastName": "Martinez Castro", + "middleInitial": "Francisco", + "importedId": "huvhkH2r2FxVvAtJY6-Nug", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170319, + "firstName": "Yanjun", + "lastName": "Chen", + "middleInitial": "", + "importedId": "klOTd6vvnIEtnWv8YHKxIQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170320, + "firstName": "Himani", + "lastName": "Deshpande", + "middleInitial": "", + "importedId": "JTPZsm2sufeTlSIwet6Ulg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170321, + "firstName": "Michael", + "lastName": "Khbeis", + "middleInitial": "", + "importedId": "mHA0TpVWSIH5nTH1eOpOFg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170322, + "firstName": "Tianyu", + "lastName": "Lao", + "middleInitial": "", + "importedId": "HFxeAeMxUUDz0RgOFiwKjg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170323, + "firstName": "Shunta", + "lastName": "Suzuki", + "middleInitial": "", + "importedId": "lZdo8Q_vBt8HUZV2N2fLBg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170324, + "firstName": "Zhenxuan", + "lastName": "He", + "middleInitial": "", + "importedId": "PN2MmTyP4iG9nLFAin4Dlg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170325, + "firstName": "Ethan", + "lastName": "Yang", + "middleInitial": "", + "importedId": "SFnrnhMTQSt4eNW65-sjpQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170326, + "firstName": "Xingbo", + "lastName": "Wang", + "middleInitial": "", + "importedId": "ynSCVhtuQpQtFHEWO8znFA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170327, + "firstName": "Emanuele", + "lastName": "Formento", + "middleInitial": "", + "importedId": "_qZuuEFBHfJ5eZsia8O_Zw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170328, + "firstName": "Liang", + "lastName": "He", + "middleInitial": "", + "importedId": "n6CDwP8NP99UPUyk2xhhVA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170329, + "firstName": "Jihong", + "lastName": "Jeung", + "middleInitial": "", + "importedId": "3jHISuaq2uQcZT-SIlaeTQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170330, + "firstName": "Andreas", + "lastName": "Fender", + "middleInitial": "Rene", + "importedId": "aGZO97mF9CBleMOKcUJ0Cw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170331, + "firstName": "Yatharth", + "lastName": "Singhal", + "middleInitial": "", + "importedId": "tveVI_iA_t4d1xhxpIwzAQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170332, + "firstName": "Rishi", + "lastName": "Rajalingham", + "middleInitial": "", + "importedId": "5Uew2oHu2nf2s2YmC1eSkw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170333, + "firstName": "Xiaoyang", + "lastName": "Wu", + "middleInitial": "", + "importedId": "gOLQekfk2ifQCxGzEt--jg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170334, + "firstName": "Alan", + "lastName": "Shen", + "middleInitial": "", + "importedId": "LvfzLKRW-ujIgsp_-UwykA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170335, + "firstName": "Yuan", + "lastName": "Tian", + "middleInitial": "", + "importedId": "qcOUAb-ui3UDwxDg6YAU0A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170336, + "firstName": "Robert", + "lastName": "Wang", + "middleInitial": "", + "importedId": "JS49VzxwL4w_sDFFZ8Zvlg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170337, + "firstName": "Zoran", + "lastName": "Kostic", + "middleInitial": "", + "importedId": "4-mST_BpxKrJeiZYSCExXQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170338, + "firstName": "Susanne", + "lastName": "Schmidt", + "middleInitial": "", + "importedId": "nty9T-FExBWpAOmqzwYWjQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170339, + "firstName": "Anthony", + "lastName": "Tang", + "middleInitial": "", + "importedId": "YGO4_l5XRHyTv-hGHbY83g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170340, + "firstName": "Angelina", + "lastName": "Zheng", + "middleInitial": "J", + "importedId": "PwES2RG8bvzqw_hdH0VZxw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170341, + "firstName": "YINGQING", + "lastName": "XU", + "middleInitial": "", + "importedId": "H75qy6n2841HbEHktOfnxA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170342, + "firstName": "Shanqing", + "lastName": "Cai", + "middleInitial": "", + "importedId": "3OCc4psTo4ZnR_gIv2aENg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170343, + "firstName": "Deying", + "lastName": "Pan", + "middleInitial": "", + "importedId": "E5mcgU3lE6y5hgAECvv_5w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170344, + "firstName": "Shm", + "lastName": "Almeda", + "middleInitial": "Garanganao", + "importedId": "xR2O1o1shoxNo6TSOBRLHQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170345, + "firstName": "Jiafei", + "lastName": "Duan", + "middleInitial": "", + "importedId": "So0GOzbq7m6UD1RqlGKsSA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170346, + "firstName": "Ian", + "lastName": "Arawjo", + "middleInitial": "", + "importedId": "9Cmnmubftugec7sLlAw1QQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170347, + "firstName": "Stefanie", + "lastName": "Mueller", + "middleInitial": "", + "importedId": "5vj196FvEHPKhaKPi53vKw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170348, + "firstName": "Chunyang", + "lastName": "Chen", + "middleInitial": "", + "importedId": "DKApI2jSxea4NOOuL6zZtg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170349, + "firstName": "Robert", + "lastName": "Xiao", + "middleInitial": "", + "importedId": "MG2oLsriVTMXVgzxme3DOw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170350, + "firstName": "Clara", + "lastName": "Lempert", + "middleInitial": "", + "importedId": "qOrwRKiwxuilXs8kr6Q1RA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170351, + "firstName": "Andreas", + "lastName": "Bulling", + "middleInitial": "", + "importedId": "8yArQHb_3Ykoz836YJ3feQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170352, + "firstName": "Rolando", + "lastName": "Garcia", + "middleInitial": "", + "importedId": "jPgianUvUVtnRxGd6vjdjg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170353, + "firstName": "Shugao", + "lastName": "Ma", + "middleInitial": "", + "importedId": "SU7HWfSp22qufEOeRtWeeQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170354, + "firstName": "Minhyeok", + "lastName": "Baek", + "middleInitial": "", + "importedId": "DrylKXc0nkqs1pYdN0zdyA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170355, + "firstName": "Chien-Sheng", + "lastName": "Wu", + "middleInitial": "", + "importedId": "OIDMmvh5bDfl6_BfZi3WEA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170356, + "firstName": "Don Derek", + "lastName": "Haddad", + "middleInitial": "", + "importedId": "PAxHYCf6RZnK5CxeNTcMyg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170357, + "firstName": "Mr Ryo", + "lastName": "Hajika", + "middleInitial": "", + "importedId": "keSmrgE2ME4rBDgU5VSjKQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170358, + "firstName": "Christian", + "lastName": "Holz", + "middleInitial": "", + "importedId": "iLPlU_z1dah9YY__uEeXsw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170359, + "firstName": "Frank", + "lastName": "Steinicke", + "middleInitial": "", + "importedId": "w2YEfsGpoTqbpROww8WcOQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170360, + "firstName": "Damyon", + "lastName": "Kim", + "middleInitial": "", + "importedId": "y-JzVKsy3G3ij_HChnU06A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170361, + "firstName": "Devon", + "lastName": "McKeon", + "middleInitial": "", + "importedId": "Eg7smNfyIRJtGTciafw6cg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170362, + "firstName": "Yuze", + "lastName": "Gao", + "middleInitial": "", + "importedId": "8bNjmCNzZNBQgjk-mNcX1Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170363, + "firstName": "Alice", + "lastName": "Haynes", + "middleInitial": "C", + "importedId": "tutDwFJhpJI4AWug556JXA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170364, + "firstName": "Cathy Mengying", + "lastName": "Fang", + "middleInitial": "", + "importedId": "dkc8r1KzapyIhtY5n8lkOQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170365, + "firstName": "Nikola", + "lastName": "Banovic", + "middleInitial": "", + "importedId": "udYTUOwnTjTAWBUfwgj19A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170366, + "firstName": "Ye", + "lastName": "Tao", + "middleInitial": "", + "importedId": "QN3EMChnkOYCdgFNtITJTA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170367, + "firstName": "Roland", + "lastName": "Krisztandl", + "middleInitial": "", + "importedId": "GppBBTcvGKfrrYvY8KY9hQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170368, + "firstName": "Yi-Hao", + "lastName": "Peng", + "middleInitial": "", + "importedId": "mA32XskTkKZ9pUU0qJYRTQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170369, + "firstName": "Shichao", + "lastName": "Huang", + "middleInitial": "", + "importedId": "BMFtOVaeN-fTz0BXiVeOZQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170370, + "firstName": "Ben", + "lastName": "Wernicke", + "middleInitial": "", + "importedId": "W90ZunTFJOW5g7eSrKlLoA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170371, + "firstName": "Ran", + "lastName": "Zhou", + "middleInitial": "", + "importedId": "XjHLZwrjJo6SplEsbuMorQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170372, + "firstName": "Hsin-Ni", + "lastName": "Ho", + "middleInitial": "", + "importedId": "hEYSV5z67oUFRbpOmYm6Cg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170373, + "firstName": "Mehdi", + "lastName": "Gouasmi", + "middleInitial": "", + "importedId": "oW275-LHAQeIH4mGHwrwbg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170374, + "firstName": "Chris", + "lastName": "Harrison", + "middleInitial": "", + "importedId": "s4DDgcdMxU0xmPJmhyUsNQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170375, + "firstName": "Stefanos", + "lastName": "Nikolaidis", + "middleInitial": "", + "importedId": "3xIQxws42Gu8S61quc82xQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170376, + "firstName": "Pedro", + "lastName": "Lopes", + "middleInitial": "", + "importedId": "vu7xKEsb0s2o9WvEByjw7w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170377, + "firstName": "Raymond", + "lastName": "Fok", + "middleInitial": "", + "importedId": "jXq_QtC0baa0c0TQuJEkbQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170378, + "firstName": "Rajesh", + "lastName": "Balan", + "middleInitial": "", + "importedId": "VvIassImxVR7pywru9I1Qg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170379, + "firstName": "Xun", + "lastName": "Qian", + "middleInitial": "", + "importedId": "Mzd41Mu9XTzsFqvxf99-oA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170380, + "firstName": "Tsubasa", + "lastName": "Saito", + "middleInitial": "", + "importedId": "EfJ5yeFm-W9UjrztKAnrKg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170381, + "firstName": "Maja", + "lastName": "Mataric", + "middleInitial": "", + "importedId": "Qj4BzCAFJO15QzgsHd_hMA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170382, + "firstName": "Juheon", + "lastName": "Yi", + "middleInitial": "", + "importedId": "FSiovMYmIq651CGs0mAEwA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170383, + "firstName": "Mingxu", + "lastName": "Zhou", + "middleInitial": "", + "importedId": "X0VqOPin5VY0GxuZs4rwgQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170384, + "firstName": "Stephanie", + "lastName": "Naufel", + "middleInitial": "", + "importedId": "w3x2p0Q4i7nd5pb_H82a9Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170385, + "firstName": "Basel", + "lastName": "Hindi", + "middleInitial": "", + "importedId": "cSinB5rgdfCAygGGznd6Fw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170386, + "firstName": "Vimal", + "lastName": "Mollyn", + "middleInitial": "", + "importedId": "9bLhzLgJ7rym4AIif7somA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170387, + "firstName": "Carmel", + "lastName": "Majidi", + "middleInitial": "", + "importedId": "RoW834kWcaWIPakMTwdhcw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170388, + "firstName": "Alex", + "lastName": "Mazursky", + "middleInitial": "", + "importedId": "7BMFQW2Ko9OVQblRuGVH3Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170389, + "firstName": "Sanjay", + "lastName": "Varghese", + "middleInitial": "", + "importedId": "7MYCUk5pR7i2dTJl8G_FDA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170390, + "firstName": "Daniel", + "lastName": "Vogel", + "middleInitial": "", + "importedId": "VH8XpxsJEUlDmsjNcBbGOA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170391, + "firstName": "SeungJun", + "lastName": "Kim", + "middleInitial": "", + "importedId": "UzsGew0kYu72FK5pPTU3jg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170392, + "firstName": "Billy", + "lastName": "Dou", + "middleInitial": "", + "importedId": "tWOF4mQKXNGAMWWkrv43Gw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170393, + "firstName": "Haijun", + "lastName": "Xia", + "middleInitial": "", + "importedId": "qR18CZssIG8qNcFRCGcljg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170394, + "firstName": "Yijing", + "lastName": "Jiang", + "middleInitial": "", + "importedId": "AHhZriooqR2FewKkGX0odg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170395, + "firstName": "Tovi", + "lastName": "Grossman", + "middleInitial": "", + "importedId": "y0gW9n9_FRp81xTibIb1pg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170396, + "firstName": "Kayvon", + "lastName": "Fatahalian", + "middleInitial": "", + "importedId": "oY9pcCLf8ZluY7izf0iiEQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170397, + "firstName": "Koushik", + "lastName": "Srinivasula", + "middleInitial": "", + "importedId": "8SMfngqpiVksLkVAMNr7AA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170398, + "firstName": "Kyunghwan", + "lastName": "Kim", + "middleInitial": "", + "importedId": "ttvfnkW7aulxHroeXJcfxQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170399, + "firstName": "Ken", + "lastName": "Takaki", + "middleInitial": "", + "importedId": "aUMpjyUeFeyjUicQc6lgPw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170400, + "firstName": "Soma", + "lastName": "Narita", + "middleInitial": "", + "importedId": "BFqMqJn8Tc6-WnB3L1lyxA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170401, + "firstName": "Aditya", + "lastName": "Parameswaran", + "middleInitial": "", + "importedId": "vCw9wwMmYCb0qLinmecNTg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170402, + "firstName": "Henry", + "lastName": "Hoffman", + "middleInitial": "", + "importedId": "duk1cnVb0dP5e6tg1VfDrg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170403, + "firstName": "Advait", + "lastName": "Sarkar", + "middleInitial": "", + "importedId": "pgyTBxlnpdhDNVL8LEsfwA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170404, + "firstName": "Yunzhan", + "lastName": "Zhou", + "middleInitial": "", + "importedId": "oEAAezRCCrt0nSVvm9NzQQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170405, + "firstName": "Thomas", + "lastName": "Pietrzak", + "middleInitial": "", + "importedId": "KmElHP6vxBvjChWjJoti3w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170406, + "firstName": "Kimberly L.H.", + "lastName": "Carpenter", + "middleInitial": "", + "importedId": "l8BYV-k2UrdIUXJGtf7qtA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170407, + "firstName": "Anton", + "lastName": "Hackl", + "middleInitial": "Friedrich", + "importedId": "_xAeNqscF0gtr7fcP-QkYg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170408, + "firstName": "Tim", + "lastName": "Rolff", + "middleInitial": "", + "importedId": "oLYBD-AiaA2vPEVPY2YTeQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170409, + "firstName": "Yu", + "lastName": "Fu", + "middleInitial": "", + "importedId": "DwkofBecA5-X7Q2WBPaLdw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170410, + "firstName": "Pragathi", + "lastName": "Praveena", + "middleInitial": "", + "importedId": "uvQbsrvxThw_BU5L3eSUZQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170411, + "firstName": "Yuhao", + "lastName": "Zhu", + "middleInitial": "", + "importedId": "JquQx-RKloJmxcQcStNRLQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170412, + "firstName": "Miroslav", + "lastName": "Bachinski", + "middleInitial": "", + "importedId": "AYKi0LfBnNWYryIvgeYlsw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170413, + "firstName": "Luca", + "lastName": "Musk", + "middleInitial": "", + "importedId": "r0fZ4cQe67KCOj759rQiOg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170414, + "firstName": "Chang", + "lastName": "Xiao", + "middleInitial": "", + "importedId": "zj-3Auk1zTBUkAHqgdGAYA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170415, + "firstName": "Fadi", + "lastName": "Botros", + "middleInitial": "", + "importedId": "7R_FQYik3SU4LfdsP-IlrA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170416, + "firstName": "Benedict", + "lastName": "Leung", + "middleInitial": "", + "importedId": "KkiW-0qMlXVNZm22r2unRg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170417, + "firstName": "Yuki", + "lastName": "Igarashi", + "middleInitial": "", + "importedId": "IXU9I0DdSHApTJNWEe1cCw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170418, + "firstName": "Yizhong", + "lastName": "Chen", + "middleInitial": "", + "importedId": "LFG0strL4-Lqi7bBWj7v7Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170419, + "firstName": "Hui", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "LGxgMYv7TRTuxqOm8dc_pw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170420, + "firstName": "Wieland", + "lastName": "Storch", + "middleInitial": "", + "importedId": "bidJSNea2FOTH5LGxqOkzw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170421, + "firstName": "Vikram", + "lastName": "Aikat", + "middleInitial": "", + "importedId": "dStXOaeUnQV_ASwMW9ovpw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170422, + "firstName": "Yu-Cheng", + "lastName": "Chang", + "middleInitial": "", + "importedId": "pfkFpvD2cmftCk6Juwd97A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170423, + "firstName": "Karin", + "lastName": "Tomonaga", + "middleInitial": "", + "importedId": "QsFRi-KCSkzEaZ5pw9tfmg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170424, + "firstName": "Jin Ryong", + "lastName": "Kim", + "middleInitial": "", + "importedId": "VmNZR87New6oRuGF8A1vsg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170425, + "firstName": "Hrvoje", + "lastName": "Benko", + "middleInitial": "", + "importedId": "wlbeOcO0F7b-yrwrphSmNA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170426, + "firstName": "Mizuki", + "lastName": "Ishida", + "middleInitial": "", + "importedId": "XtDt0H3uzum2kNk9Ow6csw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170427, + "firstName": "Kris", + "lastName": "Kitani", + "middleInitial": "", + "importedId": "HVbRNMy4YrMDvkyzLZWuQA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170428, + "firstName": "Takeru", + "lastName": "Hashimoto", + "middleInitial": "", + "importedId": "eDrbw5Burt9qbTDpw5E4VQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170429, + "firstName": "Megan", + "lastName": "Wang", + "middleInitial": "", + "importedId": "Z2pQsEAx-pnpOZAsyEErHg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170430, + "firstName": "Mark", + "lastName": "Billinghurst", + "middleInitial": "", + "importedId": "QlX1SYXVMZkLEc_X2oWX1A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170431, + "firstName": "Sunjun", + "lastName": "Kim", + "middleInitial": "", + "importedId": "TIUjkZ9IJH98mZdEbS8qIQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170432, + "firstName": "Gwangbin", + "lastName": "Kim", + "middleInitial": "", + "importedId": "N8WAz28gH2_8uGSCQ6Aa7A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170433, + "firstName": "Liwenhan", + "lastName": "Xie", + "middleInitial": "", + "importedId": "NlG2YsNb5nvOv-ZWWyjNig", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170434, + "firstName": "Zejun", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "qiFLfYNMRIg4P5HeUqBeeg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170435, + "firstName": "Devon", + "lastName": "Frost", + "middleInitial": "", + "importedId": "xbY0bYglZtuUIjNvrhCmyQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170436, + "firstName": "Paul", + "lastName": "Strohmeier", + "middleInitial": "", + "importedId": "7V8Xnrni5cGCD5_hFpzR2w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170437, + "firstName": "Xi", + "lastName": "Chen", + "middleInitial": "", + "importedId": "h3qlTmznGGS7RLMkGpKADw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170438, + "firstName": "Zezhou", + "lastName": "Sun", + "middleInitial": "", + "importedId": "LvKjD_ye91P-My_mrplMWw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170439, + "firstName": "Janavi", + "lastName": "Gupta", + "middleInitial": "", + "importedId": "BET0_I5tg_V7cRsYnGAr8A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170440, + "firstName": "Ming Yun", + "lastName": "Hsu", + "middleInitial": "", + "importedId": "NXd8hpoANe2jZp4b34eAKQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170441, + "firstName": "Ching-Yi", + "lastName": "Tsai", + "middleInitial": "", + "importedId": "MOBgRF-g5hWFk7L_Iyn8Lg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170442, + "firstName": "Chris", + "lastName": "Donahue", + "middleInitial": "", + "importedId": "21ybP_B2ZLdIpUks7eMFVA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170443, + "firstName": "Daniel", + "lastName": "Weld", + "middleInitial": "S", + "importedId": "iopluiDMx007wNu9A4yNGQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170444, + "firstName": "Jurjaan", + "lastName": "Noim", + "middleInitial": "Onayza", + "importedId": "7vG6QfzUbg7cUik4sGvebA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170445, + "firstName": "Shwetha", + "lastName": "Rajaram", + "middleInitial": "", + "importedId": "2OiZycHFv3YV4Uj-8Lqnrg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170446, + "firstName": "Kun-Woo", + "lastName": "Song", + "middleInitial": "", + "importedId": "YKKawr_n7mCILy_QjY0y7g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170447, + "firstName": "Conrad", + "lastName": "Lempert", + "middleInitial": "", + "importedId": "R_LGhNf_v2hROWTcym2ojQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170448, + "firstName": "Johanna", + "lastName": "Didion", + "middleInitial": "K.", + "importedId": "vWHKWv-PKGcLVHMwFo6Mvw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170449, + "firstName": "Chloe", + "lastName": "Haigh", + "middleInitial": "", + "importedId": "aMZWhEjIs-XKK96sZk4IWQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170450, + "firstName": "Jui-Cheng", + "lastName": "Chiu", + "middleInitial": "", + "importedId": "4RZQ0yIK-bpbl4SbX-p5zQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170451, + "firstName": "Tobias", + "lastName": "Jungbluth", + "middleInitial": "Patrick", + "importedId": "XF10wpfbVp7mpeIjmC4zDg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170452, + "firstName": "Xin", + "lastName": "Tong", + "middleInitial": "", + "importedId": "TsoYZmno_TeVyGiO8kosrQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170453, + "firstName": "Meng Ting", + "lastName": "Shih", + "middleInitial": "", + "importedId": "T6brKddl_nbwss61QqtZtQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170454, + "firstName": "Seungmoon", + "lastName": "Choi", + "middleInitial": "", + "importedId": "UlJ8d0NaQUVWuaB4WIcRMA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170455, + "firstName": "Hyo", + "lastName": "Kang", + "middleInitial": "", + "importedId": "MQqmlbLWIffHEZt9V5JZSg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170456, + "firstName": "Ziyi", + "lastName": "Liu", + "middleInitial": "", + "importedId": "zRaAg-C-1-mHnAEf3QsrGA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170457, + "firstName": "Minrui", + "lastName": "Xu", + "middleInitial": "", + "importedId": "FeSh9GFlrXWenpR1AqVEkg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170458, + "firstName": "Amy", + "lastName": "Pavel", + "middleInitial": "", + "importedId": "mWHY4paujYm0BGpPaankqg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170459, + "firstName": "Ryan", + "lastName": "Yen", + "middleInitial": "", + "importedId": "TzPc-ko18NPDiuirghmuEA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170460, + "firstName": "Marina", + "lastName": "Spanos", + "middleInitial": "", + "importedId": "Jad13V6sECzzmhq6_kUgog", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170461, + "firstName": "Tudor", + "lastName": "Tibu", + "middleInitial": "", + "importedId": "aDXAbfiLAPXQJoVuZy5x5w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170462, + "firstName": "Asaf", + "lastName": "Liberman", + "middleInitial": "", + "importedId": "WyDJto_Cgt7vo3uRMbd3rg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170463, + "firstName": "Shoi", + "lastName": "Tou", + "middleInitial": "", + "importedId": "5eYZOUSoApTz2xc4dPQBsg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170464, + "firstName": "Jack", + "lastName": "Williams", + "middleInitial": "", + "importedId": "E0JUV8gaPJiDLS1EMhlhLA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170465, + "firstName": "Jeff", + "lastName": "Huang", + "middleInitial": "", + "importedId": "aGM6vmSOXksq64vMAOoojA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170466, + "firstName": "Yifan", + "lastName": "Zou", + "middleInitial": "", + "importedId": "VFhtdGQfVnq24ShA2oMcag", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170467, + "firstName": "Xuezhu", + "lastName": "Wang", + "middleInitial": "", + "importedId": "nZoV8K1VkiIq1KRsHnUCMg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170468, + "firstName": "Yun", + "lastName": "Wang", + "middleInitial": "", + "importedId": "iQR3kHR7ylO_mWSQf9bguw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170469, + "firstName": "Junpu", + "lastName": "Yu", + "middleInitial": "", + "importedId": "proAmKDqukmKnXSmPxBKLg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170470, + "firstName": "Gaozhang", + "lastName": "Chen", + "middleInitial": "", + "importedId": "xsESQ_fV8c2kMXfOc3mkgQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170471, + "firstName": "Elena", + "lastName": "Glassman", + "middleInitial": "L.", + "importedId": "QmlJycOy4LH6PdhoUoRyEw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170472, + "firstName": "Ziyi", + "lastName": "Xia", + "middleInitial": "", + "importedId": "ouRGnWFJ7rLCNelbIbXrbQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170473, + "firstName": "Zhongmin", + "lastName": "Cai", + "middleInitial": "", + "importedId": "AHdG_y_hPr7vtCoLUOVmIQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170474, + "firstName": "Muhammad", + "lastName": "Abdullah", + "middleInitial": "", + "importedId": "uMJEArnyBaOHD0nBraCwfw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170475, + "firstName": "Yapeng", + "lastName": "Tian", + "middleInitial": "", + "importedId": "B2UOMdZkw1tzkmNfohfXaQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170476, + "firstName": "Yan", + "lastName": "Chen", + "middleInitial": "", + "importedId": "nMaLQpu3pR6H8kYCJ1NR1w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170477, + "firstName": "Vishnu", + "lastName": "Sarukkai", + "middleInitial": "", + "importedId": "gtY1krs9cFYMEGDBdf72iA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170478, + "firstName": "Hyunsung", + "lastName": "Cho", + "middleInitial": "", + "importedId": "64gIqhOL3ier0eiNv7FCGw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170479, + "firstName": "Seongjun", + "lastName": "Kang", + "middleInitial": "", + "importedId": "9mRqUfcrMrA5VZ22Dg4LcQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170480, + "firstName": "Mengwei", + "lastName": "Xu", + "middleInitial": "", + "importedId": "YWmnoTvDul46Wshq7SyOcw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170481, + "firstName": "Lei", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "2-N3gVgQazRiRdYtKjeiEg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170482, + "firstName": "Kenji", + "lastName": "Suzuki", + "middleInitial": "", + "importedId": "g_r-irBTyMec4G1Qk98-Qg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170483, + "firstName": "Andi", + "lastName": "Xu", + "middleInitial": "", + "importedId": "o6ENfuVySNcvzUYXxKZAHw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170484, + "firstName": "Kentaro", + "lastName": "Takemura", + "middleInitial": "", + "importedId": "Qo4Fel8PouXYkgjZ3_-djQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170485, + "firstName": "Dae R.", + "lastName": "Jeong", + "middleInitial": "", + "importedId": "jNL3W0eK_Su-vtOfVOoTXQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170486, + "firstName": "Diana", + "lastName": "Soponar", + "middleInitial": "", + "importedId": "sLrs4nIxPoyI9VK8cjrArQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170487, + "firstName": "Sungsoo Ray", + "lastName": "Hong", + "middleInitial": "", + "importedId": "Ofpd6HjnSFOWOD8CPkbw3w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170488, + "firstName": "Zhuang", + "lastName": "Li", + "middleInitial": "", + "importedId": "MsQF7VapOdzTTfDFfBRqrQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170489, + "firstName": "Hirotaka", + "lastName": "Hiraki", + "middleInitial": "", + "importedId": "q7g1mZpZnwW0YelNEL3zUw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170490, + "firstName": "Sam", + "lastName": "Wong", + "middleInitial": "", + "importedId": "wLOV9vhjBm8tTjIIIa_NFg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170491, + "firstName": "Eve", + "lastName": "Hoggan", + "middleInitial": "", + "importedId": "rpxJ_Bg4EWuFi5wJqCMK1A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170492, + "firstName": "Takashi", + "lastName": "Ijiri", + "middleInitial": "", + "importedId": "q5WSkGTwJuZGfHB-6Jfl1A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170493, + "firstName": "Mohammad Ruhul", + "lastName": "Amin", + "middleInitial": "", + "importedId": "RA5gAaR5RA-hvJZMuzH9Mg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170494, + "firstName": "Seokhyun", + "lastName": "Hwang", + "middleInitial": "", + "importedId": "OgKdtJqx0Q4Q-trvadfftw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170495, + "firstName": "Zhengzhe", + "lastName": "Zhu", + "middleInitial": "", + "importedId": "pnenrv-7MnVSj52a1Ksn8A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170496, + "firstName": "James", + "lastName": "Smith", + "middleInitial": "", + "importedId": "Tm1g_LD6Ud7BqMLPBru40Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170497, + "firstName": "Mingyi", + "lastName": "Li", + "middleInitial": "", + "importedId": "9EpK8XGHt335EnbXOgxPiQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170498, + "firstName": "Mayank", + "lastName": "Patel", + "middleInitial": "", + "importedId": "9UUKtJS31uz9mv5o7x-hIA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170499, + "firstName": "Akhil", + "lastName": "Padmanabha", + "middleInitial": "", + "importedId": "GmOtqStPfgHAorV1IxRs2Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170500, + "firstName": "Fan", + "lastName": "Liu", + "middleInitial": "", + "importedId": "QNjBGr69IX6V5mRFB9c_wg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170501, + "firstName": "Joseph Chee", + "lastName": "Chang", + "middleInitial": "", + "importedId": "zuS02m6iwKI6qMyzZ93-Sg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170502, + "firstName": "Jen", + "lastName": "Coughenour", + "middleInitial": "", + "importedId": "Qdp9eg0RW6FLQ2svjDxk9g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170503, + "firstName": "Cheng", + "lastName": "Xue", + "middleInitial": "", + "importedId": "RTpiDiY8EMJPBV-cWB_GCA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170504, + "firstName": "Aditya", + "lastName": "Bagaria", + "middleInitial": "", + "importedId": "j30L5DEbOy9goWI5y8GP9g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170505, + "firstName": "Wanhui", + "lastName": "Li", + "middleInitial": "", + "importedId": "dorqXTsYOVHnG5CBZGSmaQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170506, + "firstName": "Aneesh", + "lastName": "Tarun", + "middleInitial": "P.", + "importedId": "IiDFAoU-x291ex9OXycHiw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170507, + "firstName": "Eric", + "lastName": "Gonzalez", + "middleInitial": "J", + "importedId": "mk1k5jXFskOrNL6oFnqiLQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170508, + "firstName": "Steve", + "lastName": "Hodges", + "middleInitial": "", + "importedId": "gRv7CYVQNVZB2DZUjxLdvQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170509, + "firstName": "Amirhossein", + "lastName": "H. Memar", + "middleInitial": "", + "importedId": "RHQNH32B875JdxVtqGFeGw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170510, + "firstName": "Xiaoyi", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "cu1koqcuhvTnOquc9-hn7A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170511, + "firstName": "Ryan", + "lastName": "Rossi", + "middleInitial": "", + "importedId": "2Y4JZCP0NlLH4-1OrbIqTg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170512, + "firstName": "Yixiao", + "lastName": "Kang", + "middleInitial": "", + "importedId": "auC7vW2pWnwqU1KkoLzMcg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170513, + "firstName": "Xiaojing", + "lastName": "Zhou", + "middleInitial": "", + "importedId": "GjgN3S7k4ZR1qXLtJSGa-Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170514, + "firstName": "Yihong", + "lastName": "Wu", + "middleInitial": "", + "importedId": "8q47RrdlEpATt8XvPaIH3Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170515, + "firstName": "Guillermo", + "lastName": "Sapiro", + "middleInitial": "", + "importedId": "brcS42VnArGOTXU7ST79IA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170516, + "firstName": "Harrison", + "lastName": "Goldstein", + "middleInitial": "", + "importedId": "RBcYzx0_YRqfOMRvaYzVUA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170517, + "firstName": "Zining", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "h8Vt-rqW2EUAKwnwXJN4aA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170518, + "firstName": "Zixin", + "lastName": "Guo", + "middleInitial": "", + "importedId": "7vawcvK9LsUspD-NcrOW-g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170519, + "firstName": "Yunhe", + "lastName": "Yan", + "middleInitial": "", + "importedId": "neM_I3c1uKM2lkl8c0WDmg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170520, + "firstName": "Michelle", + "lastName": "Lam", + "middleInitial": "S.", + "importedId": "PNu1acysQ8BLn4Xm64QTPQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170521, + "firstName": "Xinshuang", + "lastName": "Liu", + "middleInitial": "", + "importedId": "W-IVA2ooPgXZ6gRy0X47zQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170522, + "firstName": "Mengtian", + "lastName": "Gan", + "middleInitial": "", + "importedId": "4fkkV0MgIT0fjfGrNnUzQw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170523, + "firstName": "Benjamin", + "lastName": "Pierce", + "middleInitial": "C.", + "importedId": "iHNpREy5aNPpYY9fI9nn1Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170524, + "firstName": "Chrys", + "lastName": "Morton", + "middleInitial": "", + "importedId": "sAYf-KLoiXB8w6h3HS3EHw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170525, + "firstName": "Rebecca", + "lastName": "Hicke", + "middleInitial": "MM", + "importedId": "Yp1sOCVu3RcNnnaXYc24tA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170526, + "firstName": "Kian Peen", + "lastName": "Yeo", + "middleInitial": "", + "importedId": "o19nRWoFL_GCwERv2NI11A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170527, + "firstName": "Tianyu", + "lastName": "Yu", + "middleInitial": "", + "importedId": "YU-ZtPbMYOviJULKrWbRBw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170528, + "firstName": "Takahiro", + "lastName": "Miura", + "middleInitial": "", + "importedId": "HwZgjUPo3REjCwEr0VVRrg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170529, + "firstName": "Rushil", + "lastName": "Sojitra", + "middleInitial": "H", + "importedId": "XUNDBT1-JxTOW0Nzx4IeAA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170530, + "firstName": "Antonio", + "lastName": "Krüger", + "middleInitial": "", + "importedId": "7BVGaBoskVivG5iBzHVi8Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170531, + "firstName": "Hao", + "lastName": "Jin", + "middleInitial": "", + "importedId": "PwqEIg_crqxBX74P-fR9NA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170532, + "firstName": "Homei", + "lastName": "Miyashita", + "middleInitial": "", + "importedId": "8SUm8tZAhEI_3G3BIXTBiQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170533, + "firstName": "Jean-Peïc", + "lastName": "Chou", + "middleInitial": "", + "importedId": "OklrqEVeShlCI4KGRpYGzQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170534, + "firstName": "Andrew", + "lastName": "Head", + "middleInitial": "", + "importedId": "Vu2IqtRmC5v8JnkUlR3MjQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170535, + "firstName": "Xueyan", + "lastName": "Cai", + "middleInitial": "", + "importedId": "NejoSi697UXtN16fDWv1Vg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170536, + "firstName": "Sutirtha", + "lastName": "Roy", + "middleInitial": "", + "importedId": "r6YQS3k3x5doHWn39mi52Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170537, + "firstName": "Jon", + "lastName": "Froehlich", + "middleInitial": "E.", + "importedId": "WzZztLsZHV7nWIHWf-9_EA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170538, + "firstName": "Naomi", + "lastName": "Davis", + "middleInitial": "", + "importedId": "zf1q6pJNJVhRGpOuUE1qxg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170539, + "firstName": "David Chuan-En", + "lastName": "Lin", + "middleInitial": "", + "importedId": "R4BeOlIwrD06tJ5OqLO0pA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170540, + "firstName": "Emilie", + "lastName": "Faracci", + "middleInitial": "", + "importedId": "7jdI2lmn2HQlcs_maFafow", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170541, + "firstName": "Shusuke", + "lastName": "Kanazawa", + "middleInitial": "", + "importedId": "2VBXcgD_OmEcjCYOSC0Zxg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170542, + "firstName": "Jan", + "lastName": "Bartels", + "middleInitial": "Ulrich", + "importedId": "l9KyD8wzeFqRkdVTf8kNhA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170543, + "firstName": "Mohamed", + "lastName": "Kari", + "middleInitial": "", + "importedId": "Lrml1MyYjhpG-MTWVzgutQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170544, + "firstName": "Jingyi", + "lastName": "Li", + "middleInitial": "", + "importedId": "Z261FiwxZTVz9kmen0433w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170545, + "firstName": "Junjie", + "lastName": "Tang", + "middleInitial": "", + "importedId": "wG_SNECjpv2NhSpC5fgZEA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170546, + "firstName": "Gang", + "lastName": "Li", + "middleInitial": "", + "importedId": "TQpRduEex-Gf5STzPvo8gQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170547, + "firstName": "Ling", + "lastName": "Qin", + "middleInitial": "", + "importedId": "Tf9_AyxOWNp_SZNdIHxX7w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170548, + "firstName": "Chen", + "lastName": "Zhu-Tian", + "middleInitial": "", + "importedId": "HhSPSVLiDR95HpLixV2VCA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170549, + "firstName": "Shuichi", + "lastName": "Sakai", + "middleInitial": "", + "importedId": "g4O25Icw3I0PRdW6nZh5rA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170550, + "firstName": "Hechuan", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "2-P1ebUGm_stEEulh8x6lQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170551, + "firstName": "Ami", + "lastName": "Takahashi", + "middleInitial": "", + "importedId": "RfQ08V9xJiFX8-x2Q5EEEg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170552, + "firstName": "Chiao", + "lastName": "Fang", + "middleInitial": "", + "importedId": "hkcGY5zjY8GDiuTTznLGBA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170553, + "firstName": "Nikhil", + "lastName": "Verma", + "middleInitial": "", + "importedId": "cqgHa9Q50UTxZ_qLasi9Wg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170554, + "firstName": "Paul", + "lastName": "Streli", + "middleInitial": "", + "importedId": "iBBibiAD0taDgXYe3eG7Yw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170555, + "firstName": "Anindya", + "lastName": "Das Antar", + "middleInitial": "", + "importedId": "yNqyUFmjgin2vhjbmRXKPA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170556, + "firstName": "Sera", + "lastName": "Lee", + "middleInitial": "", + "importedId": "7fBquhqfVk1GX7WsNQaHaQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170557, + "firstName": "Soroush", + "lastName": "Shahi", + "middleInitial": "", + "importedId": "s5IGVhuaczXDIJSz57nwjg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170558, + "firstName": "Dengming", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "zTLCFSKXzBv2x4w7b9gxhg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170559, + "firstName": "Hyunyoung", + "lastName": "Kim", + "middleInitial": "", + "importedId": "ntYqo9wpGHLR5HKb0w560Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170560, + "firstName": "Adriana", + "lastName": "Schulz", + "middleInitial": "", + "importedId": "MHeWVCOWuKfJBrcrzQMklQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170561, + "firstName": "Huachao", + "lastName": "Mao", + "middleInitial": "", + "importedId": "1ppLmTuVFe6ggsCVe6k69Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170562, + "firstName": "Nicolai", + "lastName": "Marquardt", + "middleInitial": "", + "importedId": "O5IUgJzHUk_m0ZjqCm05gQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170563, + "firstName": "J.D.", + "lastName": "Zamfirescu-Pereira", + "middleInitial": "", + "importedId": "gUf0O7l2yHeEuT5VttA77w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170564, + "firstName": "Mayank", + "lastName": "Goel", + "middleInitial": "", + "importedId": "H7nw0w_a3oricUzwBnIyGQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170565, + "firstName": "Iosune", + "lastName": "Sarasate Azcona", + "middleInitial": "", + "importedId": "n5q4PLcJGe5alBazfb_08g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170566, + "firstName": "Hongzheng", + "lastName": "Zhao", + "middleInitial": "", + "importedId": "pSTUPd8-ULa0UxorW3iR-A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170567, + "firstName": "Qiwei", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "rc6fhhaN9nF_9-Gu5xKMRw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170568, + "firstName": "Philippe", + "lastName": "Laban", + "middleInitial": "", + "importedId": "C4fWPqc-_A0aKTbvSAGa-A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170569, + "firstName": "Preeti", + "lastName": "Vyas", + "middleInitial": "", + "importedId": "f60DWByzhQ9jTW34zmtTdQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170570, + "firstName": "Zhenzhen", + "lastName": "Shen", + "middleInitial": "", + "importedId": "ZxzEjAjd9CiH8edZAl4hcg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170571, + "firstName": "Sarita", + "lastName": "Schoenebeck", + "middleInitial": "", + "importedId": "0ZtAaRy_pj3ksr3FgWx2IA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170572, + "firstName": "Qiuyu", + "lastName": "Lu", + "middleInitial": "", + "importedId": "vm0q1Ia3g1yeAsGcaAdGxw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170573, + "firstName": "Sam", + "lastName": "Bourgault", + "middleInitial": "", + "importedId": "5PCX6POWN5ddvLTD0W5abg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170574, + "firstName": "Luigi", + "lastName": "Borda", + "middleInitial": "", + "importedId": "2t9GPPVc4AXwajP12p2uWA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170575, + "firstName": "Vivian", + "lastName": "Chan", + "middleInitial": "Hsinyueh", + "importedId": "ilPCiWR_rNvPu0mRXYO_iw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170576, + "firstName": "Richard", + "lastName": "Liu", + "middleInitial": "", + "importedId": "jmex7AaxyzUP8ipvQ8AS9g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170577, + "firstName": "Huamin", + "lastName": "Qu", + "middleInitial": "", + "importedId": "oB-TfawP7djih5JUiWOXYA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170578, + "firstName": "Zhe", + "lastName": "Yan", + "middleInitial": "", + "importedId": "QpXg34dLvl2jDaD-P42KTg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170579, + "firstName": "Yuewen", + "lastName": "Luo", + "middleInitial": "", + "importedId": "e0HrTFDasIJyQASHlwArUg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170580, + "firstName": "Keith", + "lastName": "Vertanen", + "middleInitial": "", + "importedId": "KubTjbY7qi1YIz4HtLf0Ug", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170581, + "firstName": "Josef", + "lastName": "Macera", + "middleInitial": "", + "importedId": "KO_U_Mhh-_FN5UrAO-qjHQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170582, + "firstName": "Lung-Pan", + "lastName": "Cheng", + "middleInitial": "", + "importedId": "eA4F65PMG-ZnFOFgV9JhsA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170583, + "firstName": "Hidetsugu", + "lastName": "Irie", + "middleInitial": "", + "importedId": "RZbOowAYpvEIUIyuLiTtZQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170584, + "firstName": "Rui", + "lastName": "He", + "middleInitial": "", + "importedId": "FnYwgammHi1hD6kdFWzrsQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170585, + "firstName": "Wolf", + "lastName": "Kienzle", + "middleInitial": "", + "importedId": "a7YhpPIwvSB8lVTzTk32Fg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170586, + "firstName": "Yang", + "lastName": "Li", + "middleInitial": "", + "importedId": "wExisjRvcivqAWCBlR1sLg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170587, + "firstName": "Krzysztof", + "lastName": "Wolski", + "middleInitial": "", + "importedId": "yMy_aQPoCTtR4S-aR1OnSA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170588, + "firstName": "Robert", + "lastName": "Haisfield", + "middleInitial": "", + "importedId": "-dg_jAoPBxiDuovgKeiwOQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170589, + "firstName": "Jiwan", + "lastName": "Kim", + "middleInitial": "", + "importedId": "moaLtaqn5MffvuzNuFd0Kw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170590, + "firstName": "Jeffrey", + "lastName": "Nichols", + "middleInitial": "", + "importedId": "xZQ-uByUZrRcJyhjqu8vfg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170591, + "firstName": "Eyoel", + "lastName": "Gebre", + "middleInitial": "", + "importedId": "7cl9eg_SwjDf6LyMlGXy1Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170592, + "firstName": "Thorbjørn", + "lastName": "Mikkelsen", + "middleInitial": "", + "importedId": "J7BYxNzrfwxi0ZIxrXMIbw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170593, + "firstName": "Fangzheng", + "lastName": "Liu", + "middleInitial": "", + "importedId": "POqo747LgR9VBCo3iSdRyw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170594, + "firstName": "Yitao", + "lastName": "Fan", + "middleInitial": "", + "importedId": "py-6QLdjiLMVurlUNR0qhA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170595, + "firstName": "Yuta", + "lastName": "Sugiura", + "middleInitial": "", + "importedId": "TeKRGK2C776DV4tpr6ldBw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170596, + "firstName": "Chang", + "lastName": "Chen", + "middleInitial": "", + "importedId": "DpgpM9ad4iCmMbvGYzktUw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170597, + "firstName": "Hongnan", + "lastName": "Lin", + "middleInitial": "", + "importedId": "EcVpo-7u8tpC-wRrhF6DGQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170598, + "firstName": "Mariana", + "lastName": "Shimabukuro", + "middleInitial": "", + "importedId": "KrgSnjt7lprn6E8LNQiYGA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170599, + "firstName": "Shuai", + "lastName": "Ma", + "middleInitial": "", + "importedId": "AefFDUuJuQO4KdxWaDTIYw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170600, + "firstName": "Amy", + "lastName": "Zhang", + "middleInitial": "X.", + "importedId": "sA9xow8RflRH-dt1aYvZCw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170601, + "firstName": "Di", + "lastName": "Wen", + "middleInitial": "", + "importedId": "ybwi4roNdTmEyKi9XqSWiw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170602, + "firstName": "Yunseo", + "lastName": "Lee", + "middleInitial": "", + "importedId": "wM9rZc7G-olTvYLe5-rl0w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170603, + "firstName": "Moshfiq-Us-Saleheen", + "lastName": "Chowdhury", + "middleInitial": "", + "importedId": "mjy3z96AAR6c7A5KusSzjQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170604, + "firstName": "Yangyang", + "lastName": "Shi", + "middleInitial": "", + "importedId": "4msvNqz15QHFYzVfgLfY_g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170605, + "firstName": "Ruofei", + "lastName": "Du", + "middleInitial": "", + "importedId": "R7ZrSPERot4-ssDFmZn9EQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170606, + "firstName": "Unai Javier", + "lastName": "Fernández", + "middleInitial": "", + "importedId": "gJf8Dfh032Uy-NEPhznymA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170607, + "firstName": "Antonio", + "lastName": "Glenn", + "middleInitial": "", + "importedId": "8_fpn4JBcTKPb5ewKYkguw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170608, + "firstName": "Shwetak", + "lastName": "Patel", + "middleInitial": "", + "importedId": "IVnjxjYkN3trolk1IiWHlw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170609, + "firstName": "Divya", + "lastName": "Kartik", + "middleInitial": "", + "importedId": "Oh05DaBJZ1LEML1QYcOaGA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170610, + "firstName": "Joe", + "lastName": "Paradiso", + "middleInitial": "", + "importedId": "mPgn9zhAqjthCQ5b5psNcQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170611, + "firstName": "Nam Wook", + "lastName": "Kim", + "middleInitial": "", + "importedId": "2FDLsIvlxIbUJ8rcEVDlhg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170612, + "firstName": "Perttu", + "lastName": "Hämäläinen", + "middleInitial": "", + "importedId": "esoYDjUrwF1Ck2SScARemg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170613, + "firstName": "Felix", + "lastName": "Hähnlein", + "middleInitial": "", + "importedId": "f0qtA7OyFmsoo5yhthxBew", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170614, + "firstName": "Yu", + "lastName": "Chen", + "middleInitial": "", + "importedId": "8gTbRL1gFvzOD0JXfltM7Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170615, + "firstName": "Zac", + "lastName": "Hatfield-Dodds", + "middleInitial": "", + "importedId": "CXg66IxvMJ7_-RPYkRFtfg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170616, + "firstName": "Somayeh", + "lastName": "Molaei", + "middleInitial": "", + "importedId": "MHoG5GwbUBUGiHvmkR_Z4g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170617, + "firstName": "Yoon", + "lastName": "Kim", + "middleInitial": "", + "importedId": "awnw6AhC7nd8iEbMtG9lGg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170618, + "firstName": "Karl", + "lastName": "Rosenberg", + "middleInitial": "Toby", + "importedId": "E0Io0IbcVR9ra7AGMGrNUA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170619, + "firstName": "Nicholas", + "lastName": "Jennings", + "middleInitial": "", + "importedId": "u4k_v4kQpyOHRQlJQDmq8Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170620, + "firstName": "Yunyi", + "lastName": "Zhu", + "middleInitial": "", + "importedId": "9lMZRMQRW9AOL2wSso2vbQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170621, + "firstName": "Austin", + "lastName": "Henley", + "middleInitial": "Z", + "importedId": "pxn_2SaVj_LEkymWRLNxwQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170622, + "firstName": "Zjenja", + "lastName": "Doubrovski", + "middleInitial": "", + "importedId": "olsZ7w_ldkufKkgqp5s8Eg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170623, + "firstName": "Amir", + "lastName": "Alavi", + "middleInitial": "H.", + "importedId": "_UcaIDvotrA0XBEcD0lD8w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170624, + "firstName": "Jaylin", + "lastName": "Herskovitz", + "middleInitial": "", + "importedId": "UBClD9N1evSL6xace0kXMA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170625, + "firstName": "Kumpei", + "lastName": "Ogawa", + "middleInitial": "", + "importedId": "Jh4WpWVeL4la6zmlFmF98w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170626, + "firstName": "Kentaro", + "lastName": "Yasu", + "middleInitial": "", + "importedId": "A7K1mK1GUhouzVCC5QCyRw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170627, + "firstName": "Seoyeon", + "lastName": "Bae", + "middleInitial": "", + "importedId": "S7ZFWxc8oP2P7Sv440vd9g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170628, + "firstName": "Yohan", + "lastName": "Yun", + "middleInitial": "", + "importedId": "Np05GcmNhd5o-T_VyCHtAg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170629, + "firstName": "Ziyue", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "3cIsfJ_EcbdKhpagjE-5pA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170630, + "firstName": "Brian", + "lastName": "Smith", + "middleInitial": "A.", + "importedId": "DUgfJ2LEtD6TOu45EPbBYA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170631, + "firstName": "Dailyn", + "lastName": "Despradel Rumaldo", + "middleInitial": "", + "importedId": "WHtowtTknb0BR6ChBQb3zg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170632, + "firstName": "Ruidong", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "4gOWCTyy2zZpqRblr9GeDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170633, + "firstName": "Asier", + "lastName": "Marzo", + "middleInitial": "", + "importedId": "fQBnOT2vkF-srKd-CVFgJA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170634, + "firstName": "Yuheng", + "lastName": "Liu", + "middleInitial": "", + "importedId": "uKOeuF6swcpwT5fFznJc6w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170635, + "firstName": "Alvaro", + "lastName": "Lopez", + "middleInitial": "", + "importedId": "nJWtvLwKu5S2ngvP3SfGig", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170636, + "firstName": "Maya", + "lastName": "Cakmak", + "middleInitial": "", + "importedId": "YGo_6Y2veS53wLbFNiWijw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170637, + "firstName": "Bhavya", + "lastName": "Garg", + "middleInitial": "", + "importedId": "sU68tLzKr_0SuR3Py4QoOA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170638, + "firstName": "Ruei-Che", + "lastName": "Chang", + "middleInitial": "", + "importedId": "fj-AI7DGbP0P11WOCt3b2w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170639, + "firstName": "Yu", + "lastName": "Zhang", + "middleInitial": "", + "importedId": "s28apWmb9jxVgJjQWKFuQA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170640, + "firstName": "Xin Yi Therese", + "lastName": "Xu", + "middleInitial": "", + "importedId": "bkQ_7UvgR0jq7cQ4V_7zdw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170641, + "firstName": "Cyrus", + "lastName": "Vachha", + "middleInitial": "", + "importedId": "av45I7PfzdTQJgYY_jzscw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170642, + "firstName": "Isabel", + "lastName": "Li", + "middleInitial": "", + "importedId": "G6g9Af3HTX5u2pdwZ51rtA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170643, + "firstName": "Erzhen", + "lastName": "Hu", + "middleInitial": "", + "importedId": "yHJpm16tdC_UN2vOL4YXcg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170644, + "firstName": "Fangtian", + "lastName": "Ying", + "middleInitial": "", + "importedId": "_7JtR9nQDVdc83olDpP8Dw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170645, + "firstName": "Jaime", + "lastName": "Gould", + "middleInitial": "", + "importedId": "FWLdX6gdADo44ODrZdJuiw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170646, + "firstName": "Yuxuan", + "lastName": "Liu", + "middleInitial": "", + "importedId": "8QWhqcMJYvRQaDEzIBQ54A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170647, + "firstName": "Syed Masum", + "lastName": "Billah", + "middleInitial": "", + "importedId": "fk_oHW4xdx90gCXhWb40Tw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170648, + "firstName": "Guanzheng", + "lastName": "Chen", + "middleInitial": "", + "importedId": "Zroc_FbqstsqgEltRtbz4g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170649, + "firstName": "Chin-Yi", + "lastName": "Cheng", + "middleInitial": "", + "importedId": "fLfUOVTo8-C47nZAgGqVTg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170650, + "firstName": "Sangyoon", + "lastName": "Lee", + "middleInitial": "", + "importedId": "7imbipCPer8EwIVeJKm4pw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170651, + "firstName": "Grace", + "lastName": "Tang", + "middleInitial": "", + "importedId": "MST-BQq5VMMwuGHLm33GpQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170652, + "firstName": "Minh Duc", + "lastName": "Vu", + "middleInitial": "", + "importedId": "ndk_29r0rquMrOgBWqQarA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170653, + "firstName": "Qi", + "lastName": "Lu", + "middleInitial": "", + "importedId": "c6dGyo45NAYS_xraav2oGA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170654, + "firstName": "Tyler", + "lastName": "Christensen", + "middleInitial": "", + "importedId": "nzaieDqkZhPGTSLJs5RG5w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170655, + "firstName": "Xintong", + "lastName": "Liu", + "middleInitial": "", + "importedId": "3uRxJZOpSGK2EgSkp6-UGQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170656, + "firstName": "Bandhav", + "lastName": "Veluri", + "middleInitial": "", + "importedId": "5-UO2u9KCNYV_s-hSinWkw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170657, + "firstName": "Scott", + "lastName": "Hudson", + "middleInitial": "E", + "importedId": "7vKKoUAiJVNW0XZVoOd-TA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170658, + "firstName": "Erwin", + "lastName": "Wu", + "middleInitial": "", + "importedId": "0fja23ihOgAOb3QPKRRbzw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170659, + "firstName": "Tongyu", + "lastName": "Zhou", + "middleInitial": "", + "importedId": "mGJ5UTG7OMOvHq8JHGRb1g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170660, + "firstName": "Donglai", + "lastName": "Wei", + "middleInitial": "", + "importedId": "ypzyqxkzHW8BbfTxRf2j8g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170661, + "firstName": "Dinara", + "lastName": "Talypova", + "middleInitial": "", + "importedId": "EwfpPsPHNLdMPql96FTIJQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170662, + "firstName": "Douglas", + "lastName": "Weber", + "middleInitial": "", + "importedId": "gmf9BWn2bAwFjxW8N0HWFw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170663, + "firstName": "Hiroya", + "lastName": "Miura", + "middleInitial": "", + "importedId": "7M-T3bysmVG1cXMp3TvkZQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170664, + "firstName": "Jun", + "lastName": "Gong", + "middleInitial": "", + "importedId": "TafTuTDQyjAU-MPnwAAPIQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170665, + "firstName": "Tingyu", + "lastName": "Cheng", + "middleInitial": "", + "importedId": "OW6mkxUkQ8ja21RxkLaW3Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170666, + "firstName": "Javad", + "lastName": "Ghaderi", + "middleInitial": "", + "importedId": "n4bOk6aUdsEJn4AkNEmIWA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170667, + "firstName": "Runchang", + "lastName": "Kang", + "middleInitial": "", + "importedId": "BvlQ1NYJQ6OBAgpjDowgqA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170668, + "firstName": "Vinitha", + "lastName": "Ranganeni", + "middleInitial": "", + "importedId": "dMLK8_ABy5jnRp90xVGnoQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170669, + "firstName": "Antti", + "lastName": "Oulasvirta", + "middleInitial": "", + "importedId": "jUiRwkj0eP82dZVQLtfCZA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170670, + "firstName": "Yasith", + "lastName": "Samaradivakara", + "middleInitial": "", + "importedId": "YFTqK1cCRlZ8NvIu7RCMNQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170671, + "firstName": "Yongjian", + "lastName": "Fu", + "middleInitial": "", + "importedId": "qG6Yjbzj0BbuJoIVufDW7w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170672, + "firstName": "Piyawat", + "lastName": "Lertvittayakumjorn", + "middleInitial": "", + "importedId": "WzyCC02UcRmt5Z74ZYHdDg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170673, + "firstName": "Han", + "lastName": "Wang", + "middleInitial": "", + "importedId": "yR5MBnnKmoT8EDT886WT0g", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170674, + "firstName": "Alexander", + "lastName": "Ching", + "middleInitial": "", + "importedId": "sIZ_N6c5fmfZWFApXfksMA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170675, + "firstName": "Likun", + "lastName": "Fang", + "middleInitial": "", + "importedId": "CfJlZD1wDCiQJ4yF5goqqQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170676, + "firstName": "David Guy", + "lastName": "Brizan", + "middleInitial": "", + "importedId": "uewJhNXyP6sfQHnMCfXopA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170677, + "firstName": "Anik", + "lastName": "Gupta", + "middleInitial": "", + "importedId": "Mv9Xk11BQOH_5U694aGhYA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170678, + "firstName": "Daniel", + "lastName": "Jackson", + "middleInitial": "", + "importedId": "RrvG2qHJZNeWuoYwUwhQDw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170679, + "firstName": "Clemens", + "lastName": "Klokmose", + "middleInitial": "Nylandsted", + "importedId": "LDrt-dZ778vMa1oCGUhNhA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170680, + "firstName": "Amanda", + "lastName": "Swearngin", + "middleInitial": "", + "importedId": "nYm9ZkLitPPwR9pP3NJroQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170681, + "firstName": "Nathaniel", + "lastName": "Dennler", + "middleInitial": "Steele", + "importedId": "Nc6bYyfH8cqeamgHsDunrw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170682, + "firstName": "Vasco", + "lastName": "Xu", + "middleInitial": "", + "importedId": "irqRlDL0Up4ppjPMeVCHcA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170683, + "firstName": "Ratchanon", + "lastName": "Wattanaparinton", + "middleInitial": "", + "importedId": "nfp-E0jf_1-etf6vet1TMQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170684, + "firstName": "Peter", + "lastName": "He", + "middleInitial": "", + "importedId": "dW2celz5TClWN6KR-0Rq9w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170685, + "firstName": "Carina", + "lastName": "Negreanu", + "middleInitial": "", + "importedId": "cXhSq0ztGKJX4HFvrK4t7w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170686, + "firstName": "Marti", + "lastName": "Hearst", + "middleInitial": "", + "importedId": "duLiJyoR-Hsgrpl3Cul0vA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170687, + "firstName": "Varad", + "lastName": "Dhat", + "middleInitial": "", + "importedId": "g9w-hd59lNPZLLin_T7h8w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170688, + "firstName": "Peter", + "lastName": "van Hardenberg", + "middleInitial": "", + "importedId": "B-MnvHpWH52X125qOLI9Rw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170689, + "firstName": "Nadya", + "lastName": "Peek", + "middleInitial": "", + "importedId": "bCuJMThz4u2trsJEGdqTyg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170690, + "firstName": "Saelyne", + "lastName": "Yang", + "middleInitial": "", + "importedId": "KqqAYbATMsEk3CeesDFqUw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170691, + "firstName": "Leah", + "lastName": "Buechley", + "middleInitial": "", + "importedId": "EkI1FZx1bzSxiLcMDryz2A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170692, + "firstName": "Shihe", + "lastName": "Wang", + "middleInitial": "", + "importedId": "Z5xt7Piw-4Cz0zCXdpEmPw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170693, + "firstName": "Laurenz", + "lastName": "Seidel", + "middleInitial": "", + "importedId": "8vRFBUN2OEL2Og-CBNBGwg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170694, + "firstName": "Yuchen", + "lastName": "Wu", + "middleInitial": "", + "importedId": "FcaJ1mFjWsZSvAV31RArkA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170695, + "firstName": "Chikelei", + "lastName": "Wang", + "middleInitial": "", + "importedId": "eeVoT6rJc2Z80yA0i1uFEw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170696, + "firstName": "Koya", + "lastName": "Narumi", + "middleInitial": "", + "importedId": "KolI4fgGxwAfs0V5a8igoQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170697, + "firstName": "Josh", + "lastName": "Pollock", + "middleInitial": "M.", + "importedId": "B9ds6SAbdMOB0g1gtAvJ-Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170698, + "firstName": "Cedric", + "lastName": "Honnet", + "middleInitial": "", + "importedId": "_REyw2ml7u4Wn8gl5W6u7A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170699, + "firstName": "Jeffrey", + "lastName": "Bigham", + "middleInitial": "P", + "importedId": "8nDgzwamTG7TuXrNxpBBuA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170700, + "firstName": "Xiulun", + "lastName": "Yin", + "middleInitial": "", + "importedId": "tpcI-AIK_war50OWTbB-Dw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170701, + "firstName": "Riku", + "lastName": "Arakawa", + "middleInitial": "", + "importedId": "NneceY8W4BjESWAfuTSpNg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170702, + "firstName": "Eldy", + "lastName": "Lazaro Vasquez", + "middleInitial": "S.", + "importedId": "KtOR_Ok1tDu6NU09ajJgWA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170704, + "firstName": "David", + "lastName": "Kim", + "middleInitial": "", + "importedId": "xk8v5POK_ET03VCDAzQmJg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170705, + "firstName": "Roderick", + "lastName": "Murray-Smith", + "middleInitial": "", + "importedId": "LwxWm0xz2dftVoo6W2MTKQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170706, + "firstName": "Rana", + "lastName": "Hanocka", + "middleInitial": "", + "importedId": "IwTa-WUOmYNjG7NFBuO-SQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170707, + "firstName": "William H", + "lastName": "Seiple", + "middleInitial": "", + "importedId": "MjL3G1IImMr254Iaf1KjZw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170708, + "firstName": "Nels", + "lastName": "Numan", + "middleInitial": "", + "importedId": "hV7b-GaEhc2gFE6tK_2xTg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170709, + "firstName": "Takashi", + "lastName": "Amesaka", + "middleInitial": "", + "importedId": "Ee3-4cSiiA6mPM8hL7KvRA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170710, + "firstName": "Pengxiang", + "lastName": "Wang", + "middleInitial": "", + "importedId": "NPrn1gi0JnxboPULuUJCTg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170711, + "firstName": "Lina", + "lastName": "Weilke", + "middleInitial": "Madlin", + "importedId": "8Dq9ohh0Quqm5DVLV6m06A", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170712, + "firstName": "Johnny", + "lastName": "Lee", + "middleInitial": "", + "importedId": "GdtrqWYDswLR7tXqzCgMdQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170713, + "firstName": "Nazmus", + "lastName": "Saquib", + "middleInitial": "", + "importedId": "KNdpiQgDH0AOXoVl358qFA", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170714, + "firstName": "Hiroki", + "lastName": "Sato", + "middleInitial": "", + "importedId": "d7Rk2KdE6CqOcLPW0uTJqg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 170715, + "firstName": "Yeqi", + "lastName": "Sang", + "middleInitial": "", + "importedId": "cQjIOHFXkXuq9neikgxfgQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 171096, + "firstName": "Haruki", + "lastName": "Takahashi", + "middleInitial": "", + "importedId": "valmwVaVJvEDw-u5ctqCxg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 171097, + "firstName": "Jeeeun", + "lastName": "Kim", + "middleInitial": "", + "importedId": "8h2BUtB4JNpWkQLqy3KMFw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 171100, + "firstName": "Raj", + "lastName": "Reddy", + "importedId": "1", + "source": "CSV", + "affiliations": [] + }, + { + "id": 171102, + "firstName": "Yaser", + "lastName": "Sheikh", + "importedId": "2", + "source": "CSV", + "affiliations": [] + }, + { + "id": 171229, + "firstName": "Xiangrong", + "lastName": "Zhu", + "middleInitial": "", + "importedId": "CIV49QIBgNnrFGxIiPlISw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 171230, + "firstName": "Yongbo", + "lastName": "Yang", + "middleInitial": "", + "importedId": "XckW0RDaiAIR8kEi5fxpfQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 172825, + "firstName": "Vladimir", + "lastName": "Kim", + "middleInitial": "", + "importedId": "8Kyq-DHsSQjgdjgU9FGQHQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 172826, + "firstName": "Nadir", + "lastName": "Weibel", + "middleInitial": "", + "importedId": "7z7qN_lZWxtdWHMuQ74weg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 172827, + "firstName": "Cuong", + "lastName": "Nguyen", + "middleInitial": "", + "importedId": "Zr6kWI7nXq7mFI0bN2adwg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 172828, + "firstName": "Chen", + "lastName": "Chen", + "middleInitial": "", + "importedId": "surIJ_ZEjhsIR8Ud2Q2D5Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 172829, + "firstName": "Keita", + "lastName": "Tsuyuguchi", + "middleInitial": "", + "importedId": "jbWeZ8ALBkSYUyHM8ipP-Q", + "source": "PCS", + "affiliations": [] + }, + { + "id": 172830, + "firstName": "Thibault", + "lastName": "Groueix", + "middleInitial": "", + "importedId": "_w1rakuHDqzV6_gyCjQ38w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175034, + "firstName": "Andy", + "lastName": "Wilson", + "middleInitial": "", + "importedId": "DUqT5WpzEWVFTvjcFgT5gQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175035, + "firstName": "Parastoo", + "lastName": "Abtahi", + "middleInitial": "", + "importedId": "6wNA8DW0RAGuKiNjrv0V4w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175036, + "firstName": "Bryan", + "lastName": "Wang", + "middleInitial": "", + "importedId": "HPOKEIUSZ5soMnVIZFkXqw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175037, + "firstName": "Artem", + "lastName": "Dementyev", + "middleInitial": "", + "importedId": "nKv9r96T7YxBIeWjQKf00w", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175038, + "firstName": "Te-Yen", + "lastName": "Wu", + "middleInitial": "", + "importedId": "6gULAFNgA33JONjOq1B8tQ", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175039, + "firstName": "Craig", + "lastName": "Shultz", + "middleInitial": "", + "importedId": "RhoQccaq8WOVcarf5yB4Lg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175040, + "firstName": "Yukang", + "lastName": "Yan", + "middleInitial": "", + "importedId": "yegGhWHVexDZ_ltCnrFojw", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175053, + "firstName": "Mira", + "lastName": "Dontcheva", + "middleInitial": "", + "importedId": "f7eOHNvrTysgmc4Vje8njg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175054, + "firstName": "Thijs", + "lastName": "Roumen", + "middleInitial": "", + "importedId": "PuZDyg-CJcfJf5qrbCDWpg", + "source": "PCS", + "affiliations": [] + }, + { + "id": 175055, + "firstName": "Andrea", + "lastName": "Bianchi", + "middleInitial": "", + "importedId": "OF1aLBJ2Gd2pF9XymboU4A", + "source": "PCS", + "affiliations": [] + } + ], + "recognitions": [ + { + "id": 10094, + "name": "BELONGING&INCLUSION", + "iconName": "heart" + } + ] +} diff --git a/sigchi2/UIST_2024_program.md b/sigchi2/UIST_2024_program.md new file mode 100644 index 0000000..13855be --- /dev/null +++ b/sigchi2/UIST_2024_program.md @@ -0,0 +1,2944 @@ + +## Manipulating Text +### Beyond the Chat: Executable and Verifiable Text-Editing with LLMs +Authors: Philippe Laban, Jesse Vig, Marti Hearst, Caiming Xiong, Chien-Sheng Wu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170790) + +Abstract: Conversational interfaces powered by Large Language Models (LLMs) have recently become a popular way to obtain feedback during document editing. However, standard chat-based conversational interfaces cannot explicitly surface the editing changes that they suggest. To give the author more control when editing with an LLM, we present InkSync, an editing interface that suggests executable edits directly within the document being edited. Because LLMs are known to introduce factual errors, Inksync also supports a 3-stage approach to mitigate this risk: Warn authors when a suggested edit introduces new information, help authors Verify the new information's accuracy through external search, and allow a third party to Audit with a-posteriori verification via a trace of all auto-generated content. +Two usability studies confirm the effectiveness of InkSync's components when compared to standard LLM-based chat interfaces, leading to more accurate and more efficient editing, and improved user experience. + + + +### ScriptViz: A Visualization Tool to Aid Scriptwriting based on a Large Movie Database +Authors: Anyi Rao, Jean-Peïc Chou, Maneesh Agrawala + +[Link](https://programs.sigchi.org/uist/2024/program/content/170838) + +Abstract: Scriptwriters usually rely on their mental visualization to create a vivid story by using their imagination to see, feel, and experience the scenes they are writing. Besides mental visualization, they often refer to existing images or scenes in movies and analyze the visual elements to create a certain mood or atmosphere. In this paper, we develop a new tool, ScriptViz, to provide external visualization based on a large movie database for the screenwriting process. It retrieves reference visuals on the fly based on scripts’ text and dialogue from a large movie database. The tool provides two types of control on visual elements that enable writers to 1) see exactly what they want with fixed visual elements and 2) see variances in uncertain elements. User evaluation among 15 scriptwriters shows that ScriptViz is able to present scriptwriters with consistent yet diverse visual possibilities, aligning closely with their scripts and helping their creation. + + + + +### SkipWriter: LLM-Powered Abbreviated Writing on Tablets +Authors: Zheer Xu, Shanqing Cai, Mukund Varma T, Subhashini Venugopalan, Shumin Zhai + +[Link](https://programs.sigchi.org/uist/2024/program/content/170930) + +Abstract: Large Language Models (LLMs) may offer transformative opportunities for text input, especially for physically demanding modalities like handwriting. We studied a form of abbreviated handwriting by designing, developing, and evaluating a prototype, named SkipWriter, that converts handwritten strokes of a variable-length prefix-based abbreviation (e.g. "ho a y" as handwritten strokes) into the intended full phrase (e.g., "how are you" in the digital format) based on the preceding context. SkipWriter consists of an in-production handwriting recognizer and an LLM fine-tuned on this task. With flexible pen input, SkipWriter allows the user to add and revise prefix strokes when predictions do not match the user's intent. An user evaluation demonstrated a 60% reduction in motor movements with an average speed of 25.78 WPM. We also showed that this reduction is close to the ceiling of our model in an offline simulation. + + + +### Bluefish: Composing Diagrams with Declarative Relations +Authors: Josh Pollock, Catherine Mei, Grace Huang, Elliot Evans, Daniel Jackson, Arvind Satyanarayan + +[Link](https://programs.sigchi.org/uist/2024/program/content/170824) + +Abstract: Diagrams are essential tools for problem-solving and communication as they externalize conceptual structures using spatial relationships. But when picking a diagramming framework, users are faced with a dilemma. They can either use a highly expressive but low-level toolkit, whose API does not match their domain-specific concepts, or select a high-level typology, which offers a recognizable vocabulary but supports a limited range of diagrams. To address this gap, we introduce Bluefish: a diagramming framework inspired by component-based user interface (UI) libraries. Bluefish lets users create diagrams using relations: declarative, composable, and extensible diagram fragments that relax the concept of a UI component. Unlike a component, a relation does not have sole ownership over its children nor does it need to fully specify their layout. To render diagrams, Bluefish extends a traditional tree-based scenegraph to a compound graph that captures both hierarchical and adjacent relationships between nodes. To evaluate our system, we construct a diverse example gallery covering many domains including mathematics, physics, computer science, and even cooking. We show that Bluefish's relations are effective declarative primitives for diagrams. Bluefish is open source, and we aim to shape it into both a usable tool and a research platform. + + + + +## Future Fabrics +### ScrapMap: Interactive Color Layout for Scrap Quilting +Authors: Mackenzie Leake, Ross Daly + +[Link](https://programs.sigchi.org/uist/2024/program/content/170743) + +Abstract: Scrap quilting is a popular sewing process that involves combining leftover pieces of fabric into traditional patchwork designs. Imagining the possibilities for these leftovers and arranging the fabrics in such a way that achieves visual goals, such as high contrast, can be challenging given the large number of potential fabric assignments within the quilt's design. We formulate the task of designing a scrap quilt as a graph coloring problem with domain-specific coloring and material constraints. Our interactive tool called ScrapMap helps quilters explore these potential designs given their available materials by leveraging the hierarchy of scrap quilt construction (e.g., quilt blocks and motifs) and providing user-directed automatic block coloring suggestions. Our user evaluation indicates that quilters find ScrapMap useful for helping them consider new ways to use their scraps and create visually striking quilts. + + + +### What's in a cable? Abstracting Knitting Design Elements with Blended Raster/Vector Primitives +Authors: Hannah Twigg-Smith, Yuecheng Peng, Emily Whiting, Nadya Peek + +[Link](https://programs.sigchi.org/uist/2024/program/content/170811) + +Abstract: In chart-based programming environments for machine knitting, patterns are specified at a low level by placing operations on a grid. This highly manual workflow makes it challenging to iterate on design elements such as cables, colorwork, and texture. While vector-based abstractions for knitting design elements may facilitate higher-level manipulation, they often include interdependencies which require stitch-level reconciliation. To address this, we contribute a new way of specifying knits with blended vector and raster primitives. Our abstraction supports the design of interdependent elements like colorwork and texture. We have implemented our blended raster/vector specification in a direct manipulation design tool where primitives are layered and rasterized, allowing for simulation of the resulting knit structure and generation of machine instructions. Through examples, we show how our approach enables higher-level manipulation of various knitting techniques, including intarsia colorwork, short rows, and cables. Specifically, we show how our tool supports the design of complex patterns including origami pleat patterns and capacitive sensor patches. + + + +### Embrogami: Shape-Changing Textiles with Machine Embroidery +Authors: Yu Jiang, Alice Haynes, Narjes Pourjafarian, Jan Borchers, Jürgen Steimle + +[Link](https://programs.sigchi.org/uist/2024/program/content/170971) + +Abstract: Machine embroidery is a versatile technique for creating custom and entirely fabric-based patterns on thin and conformable textile surfaces. However, existing machine-embroidered surfaces remain static, limiting the interactions they can support. We introduce Embrogami, an approach for fabricating textile structures with versatile shape-changing behaviors. Inspired by origami, we leverage machine embroidery to form finger-tip-scale mountain-and-valley structures on textiles with customized shapes, bistable or elastic behaviors, and modular composition. The structures can be actuated by the user or the system to modify the local textile surface topology, creating interactive elements like toggles and sliders or textile shape displays with an ultra-thin, flexible, and integrated form factor. We provide a dedicated software tool and report results of technical experiments to allow users to flexibly design, fabricate, and deploy customized Embrogami structures. With four application cases, we showcase Embrogami’s potential to create functional and flexible shape-changing textiles with diverse visuo-tactile feedback. + + + +### KODA: Knit-program Optimization by Dependency Analysis +Authors: Megan Hofmann + +[Link](https://programs.sigchi.org/uist/2024/program/content/170935) + +Abstract: Digital knitting machines have the capability to reliably manufacture seamless, textured, and multi-material garments, but these capabilities are obscured by limiting CAD tools. Recent innovations in computational knitting build on emerging programming infrastructure that gives full access to the machine's capabilities but requires an extensive understanding of machine operations and execution. In this paper, we contribute a critical missing piece of the knitting-machine programming pipeline--a program optimizer. Program optimization allows programmers to focus on developing novel algorithms that produce desired fabrics while deferring concerns of efficient machine operations to the optimizer. We present KODA, the Knit-program Optimization by Dependency Analysis method. KODA re-orders and reduces machine instructions to reduce knitting time, increase knitting reliability, and manage boilerplate operations that adjust the machine state. The result is a system that enables programmers to write readable and intuitive knitting algorithms while producing efficient and verified programs. + + + +### X-Hair: 3D Printing Hair-like Structures with Multi-form, Multi-property and Multi-function +Authors: Guanyun Wang, Junzhe Ji, Yunkai Xu, Lei Ren, Xiaoyang Wu, Chunyuan Zheng, Xiaojing Zhou, Xin Tang, Boyu Feng, Lingyun Sun, Ye Tao, Jiaji Li + +[Link](https://programs.sigchi.org/uist/2024/program/content/171007) + +Abstract: In this paper, we present X-Hair, a method that enables 3D-printed hair with various forms, properties, and functions. We developed a two-step suspend printing strategy to fabricate hair-like structures in different forms (e.g. fluff, bristle, barb) by adjusting parameters including Extrusion Length Ratio and Total Length. Moreover, a design tool is also established for users to customize hair-like structures with various properties (e.g. pointy, stiff, soft) on imported 3D models, which virtually shows the results for previewing and generates G-code files for 3D printing. We demonstrate the design space of X-Hair and evaluate the properties of them with different parameters. Through a series of applications with hair-like structures, we validate X-hair's practical usage of biomimicry, decoration, heat preservation, adhesion, and haptic interaction. + + + +### TouchpadAnyWear: Textile-Integrated Tactile Sensors for Multimodal High Spatial-Resolution Touch Inputs with Motion Artifacts Tolerance +Authors: Junyi Zhao, Pornthep Preechayasomboon, Tyler Christensen, Amirhossein H. Memar, Zhenzhen Shen, Nick Colonnese, Michael Khbeis, Mengjia Zhu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170873) + +Abstract: This paper presents TouchpadAnyWear, a novel family of textile-integrated force sensors capable of multi-modal touch input, encompassing micro-gesture detection, two-dimensional (2D) continuous input, and force-sensitive strokes. This thin (\textless 1.5~mm) and conformal device features high spatial resolution sensing and motion artifact tolerance through its unique capacitive sensor architecture. The sensor consists of a knitted textile compressive core, sandwiched by stretchable silver electrodes, and conductive textile shielding layers on both sides. With a high-density sensor pixel array (25/cm\textsuperscript{2}), TouchpadAnyWear can detect touch input locations and sizes with millimeter-scale spatial resolution and a wide range of force inputs (0.05~N to 20~N). The incorporation of miniature polymer domes, referred to as ``poly-islands'', onto the knitted textile locally stiffens the sensing areas, thereby reducing motion artifacts during deformation. These poly-islands also provide passive tactile feedback to users, allowing for eyes-free localization of the active sensing pixels. Design choices and sensor performance are evaluated using in-depth mechanical characterization. Demonstrations include an 8-by-8 grid sensor as a miniature high-resolution touchpad and a T-shaped sensor for thumb-to-finger micro-gesture input. User evaluations validate the effectiveness and usability of TouchpadAnyWear in daily interaction contexts, such as tapping, forceful pressing, swiping, 2D cursor control, and 2D stroke-based gestures. This paper further discusses potential applications and explorations for TouchpadAnyWear in wearable smart devices, gaming, and augmented reality devices. + + + + +## Storytime +### Story-Driven: Exploring the Impact of Providing Real-time Context Information on Automated Storytelling +Authors: Jan Henry Belz, Lina Weilke, Anton Winter, Philipp Hallgarten, Enrico Rukzio, Tobias Grosse-Puppendahl + +[Link](https://programs.sigchi.org/uist/2024/program/content/170763) + +Abstract: Stories have long captivated the human imagination with narratives that enrich our lives. Traditional storytelling methods are often static and not designed to adapt to the listener’s environment, which is full of dynamic changes. For instance, people often listen to stories in the form of podcasts or audiobooks while traveling in a car. Yet, conventional in-car storytelling systems do not embrace the adaptive potential of this space. The advent of generative AI is the key to creating content that is not just personalized but also responsive to the changing parameters of the environment. We introduce a novel system for interactive, real-time story narration that leverages environment and user context in correspondence with estimated arrival times to adjust the generated story continuously. Through two comprehensive real-world studies with a total of 30 participants in a vehicle, we assess the user experience, level of immersion, and perception of the environment provided by the prototype. Participants' feedback shows a significant improvement over traditional storytelling and highlights the importance of context information for generative storytelling systems. + + + +### Lumina: A Software Tool for Fostering Creativity in Designing Chinese Shadow Puppets +Authors: Zhihao Yao, Yao Lu, Qirui Sun, Shiqing Lyu, Hanxuan Li, Xing-Dong Yang, Xuezhu Wang, Guanhong Liu, Haipeng Mi + +[Link](https://programs.sigchi.org/uist/2024/program/content/170765) + +Abstract: Shadow puppetry, a culturally rich storytelling art, faces challenges transitioning to the digital realm. Creators in the early design phase struggle with crafting intricate patterns, textures, and basic animations while adhering to stylistic conventions - hindering creativity, especially for novices. This paper presents Lumina, a tool to facilitate the early Chinese shadow puppet design stage. Lumina provides contour templates, animations, scene editing tools, and machine-generated traditional puppet patterns. These features liberate creators from tedious tasks, allowing focus on the creative process. Developed based on a formative study with puppet creators, the web-based Lumina enables wide dissemination. An evaluation with 18 participants demonstrated Lumina's effectiveness and ease of use, with participants successfully creating designs spanning traditional themes to contemporary and science-fiction concepts. + + + +### PortalInk: 2.5D Visual Storytelling with SVG Parallax and Waypoint Transitions +Authors: Tongyu Zhou, Joshua Yang, Vivian Chan, Ji Won Chung, Jeff Huang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170783) + +Abstract: Efforts to expand the authoring of visual stories beyond the 2D canvas have commonly mapped flat imagery to 3D scenes or objects. This translation requires spatial reasoning, as artists must think in two spaces. We propose PortalInk, a tool for artists to craft and export 2.5D graphical stories while remaining in 2D space by using SVG transitions. This is achieved via a parallax effect that generates a sense of depth that can be further explored using pan and zoom interactions. Any canvas position can be saved and linked to in a closed drawn stroke, or "portal," allowing the artist to create spatially discontinuous, or even infinitely looping visual trajectories. We provide three case studies and a gallery to demonstrate how artists can naturally incorporate these interactions to craft immersive comics, as well as re-purpose them to support use cases beyond drawing such as animation, slide-based presentations, web design, and digital journalism. + + + +### DrawTalking: Building Interactive Worlds by Sketching and Speaking +Authors: Karl Rosenberg, Rubaiat Habib Kazi, Li-Yi Wei, Haijun Xia, Ken Perlin + +[Link](https://programs.sigchi.org/uist/2024/program/content/170730) + +Abstract: We introduce DrawTalking, an approach to building and controlling interactive worlds by sketching and speaking while telling stories. It emphasizes user control and flexibility, and gives programming-like capability without requiring code. An early open-ended study with our prototype shows that the mechanics resonate and are applicable to many creative-exploratory use cases, with the potential to inspire and inform research in future natural interfaces for creative exploration and authoring. + + + +### Patchview: LLM-powered Worldbuilding with Generative Dust and Magnet Visualization +Authors: John Chung, Max Kreminski + +[Link](https://programs.sigchi.org/uist/2024/program/content/170729) + +Abstract: Large language models (LLMs) can help writers build story worlds by generating world elements, such as factions, characters, and locations. However, making sense of many generated elements can be overwhelming. Moreover, if the user wants to precisely control aspects of generated elements that are difficult to specify verbally, prompting alone may be insufficient. We introduce Patchview, a customizable LLM-powered system that visually aids worldbuilding by allowing users to interact with story concepts and elements through the physical metaphor of magnets and dust. Elements in Patchview are visually dragged closer to concepts with high relevance, facilitating sensemaking. The user can also steer the generation with verbally elusive concepts by indicating the desired position of the element between concepts. When the user disagrees with the LLM's visualization and generation, they can correct those by repositioning the element. These corrections can be used to align the LLM's future behaviors to the user's perception. With a user study, we show that Patchview supports the sensemaking of world elements and steering of element generation, facilitating exploration during the worldbuilding process. Patchview provides insights on how customizable visual representation can help sensemake, steer, and align generative AI model behaviors with the user's intentions. + + + +### An Interactive System for Suporting Creative Exploration of Cinematic Composition Designs +Authors: Rui He, Huaxin Wei, Ying Cao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170806) + +Abstract: Designing cinematic compositions, which involves moving cameras through a scene, is essential yet challenging in filmmaking. Machinima filmmaking provides real-time virtual environments for exploring different compositions flexibly and efficiently. However, producing high-quality cinematic compositions in such environments still requires significant cinematography skills and creativity. This paper presents Cinemassist, a tool designed to support and enhance this creative process by generating a variety of cinematic composition proposals at both keyframe and scene levels, which users can incorporate into their workflows and achieve more creative results. At the crux of our system is a deep generative model trained on real movie data, which can generate plausible, diverse camera poses conditioned on 3D animations and additional input semantics. Our model enables an interactive cinematic composition design workflow where users can co-design with the model by being inspired by model-generated suggestions while having control over the generation process. Our user study and expert rating find Cinemassist can facilitate the design process for users of different backgrounds and enhance the design quality especially for users with animation expertise, demonstrating its potential as an invaluable tool in the context of digital filmmaking. + + + + +## Beyond mobile +### picoRing: battery-free rings for subtle thumb-to-index input +Authors: Ryo Takahashi, Eric Whitmire, Roger Boldu, Shiu Ng, Wolf Kienzle, Hrvoje Benko + +[Link](https://programs.sigchi.org/uist/2024/program/content/170844) + +Abstract: Smart rings for subtle, reliable finger input offer an attractive path for ubiquitous interaction with wearable computing platforms. +However, compared to ordinary rings worn for cultural or fashion reasons, smart rings are much bulkier and less comfortable, largely due to the space required for a battery, which also limits the space available for sensors. +This paper presents picoRing, a flexible sensing architecture that enables a variety of battery-free smart rings paired with a wristband. +By inductively connecting a wristband-based sensitive reader coil with a ring-based fully-passive sensor coil, picoRing enables the wristband to stably detect the passive response from the ring via a weak inductive coupling. +We demonstrate four different rings that support thumb-to-finger interactions like pressing, sliding, or scrolling. +When users perform these interactions, the corresponding ring converts each input into a unique passive response through a network of passive switches. +Combining the coil-based sensitive readout with the fully-passive ring design enables a tiny ring that weighs as little as 1.5 g and achieves a 13 cm stable readout despite finger bending, and proximity to metal. + + + +### WatchLink: Enhancing Smartwatches with Sensor Add-Ons via ECG Interface +Authors: Anandghan Waghmare, Ishan Chatterjee, Vikram Iyer, Shwetak Patel + +[Link](https://programs.sigchi.org/uist/2024/program/content/170782) + +Abstract: We introduce a low-power communication method that lets smartwatches leverage existing electrocardiogram (ECG) hardware as a data communication interface. Our unique approach enables the connection of external, inexpensive, and low-power "add-on" sensors to the smartwatch, expanding its functionalities. These sensors cater to specialized user needs beyond those offered by pre-built sensor suites, at a fraction of the cost and power of traditional communication protocols, including Bluetooth Low Energy. To demonstrate the feasibility of our approach, we conduct a series of exploratory and evaluative tests to characterize the ECG interface as a communication channel on commercial smartwatches. We design a simple transmission scheme using commodity components, demonstrating cost and power benefits. Further, we build and test a suite of add-on sensors, including UV light, body temperature, buttons, and breath alcohol, all of which achieved testing objectives at low material cost and power usage. This research paves the way for personalized and user-centric wearables by offering a cost-effective solution to expand their functionalities. + + + + +### PrISM-Observer: Intervention Agent to Help Users Perform Everyday Procedures Sensed using a Smartwatch +Authors: Riku Arakawa, Hiromu Yakura, Mayank Goel + +[Link](https://programs.sigchi.org/uist/2024/program/content/170914) + +Abstract: We routinely perform procedures (such as cooking) that include a set of atomic steps. Often, inadvertent omission or misordering of a single step can lead to serious consequences, especially for those experiencing cognitive challenges such as dementia. This paper introduces PrISM-Observer, a smartwatch-based, context-aware, real-time intervention system designed to support daily tasks by preventing errors. Unlike traditional systems that require users to seek out information, the agent observes user actions and intervenes proactively. This capability is enabled by the agent's ability to continuously update its belief in the user's behavior in real-time through multimodal sensing and forecast optimal intervention moments and methods. We first validated the steps-tracking performance of our framework through evaluations across three datasets with different complexities. Then, we implemented a real-time agent system using a smartwatch and conducted a user study in a cooking task scenario. The system generated helpful interventions, and we gained positive feedback from the participants. The general applicability of PrISM-Observer to daily tasks promises broad applications, for instance, including support for users requiring more involved interventions, such as people with dementia or post-surgical patients. + + + + +## Validation in AI/ML +### Natural Expression of a Machine Learning Model's Uncertainty Through Verbal and Non-Verbal Behavior of Intelligent Virtual Agents +Authors: Susanne Schmidt, Tim Rolff, Henrik Voigt, Micha Offe, Frank Steinicke + +[Link](https://programs.sigchi.org/uist/2024/program/content/170826) + +Abstract: Uncertainty cues are inherent in natural human interaction, as they signal to communication partners how much they can rely on conveyed information. Humans subconsciously provide such signals both verbally (e.g., through expressions such as "maybe" or "I think") and non-verbally (e.g., by diverting their gaze). In contrast, artificial intelligence (AI)-based services and machine learning (ML) models such as ChatGPT usually do not disclose the reliability of answers to their users. +In this paper, we explore the potential of combining ML models as powerful information sources with human means of expressing uncertainty to contextualize the information. We present a comprehensive pipeline that comprises (1) the human-centered collection of (non-)verbal uncertainty cues, (2) the transfer of cues to virtual agent videos, (3) the annotation of videos for perceived uncertainty, and (4) the subsequent training of a custom ML model that can generate uncertainty cues in virtual agent behavior. In a final step (5), the trained ML model is evaluated in terms of both fidelity and generalizability of the generated (non-)verbal uncertainty behavior. + + + +### Who Validates the Validators? Aligning LLM-Assisted Evaluation of LLM Outputs with Human Preferences +Authors: Shreya Shankar, J.D. Zamfirescu-Pereira, Bjoern Hartmann, Aditya Parameswaran, Ian Arawjo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170954) + +Abstract: Due to the cumbersome nature of human evaluation and limitations of code-based evaluation, Large Language Models (LLMs) are increasingly being used to assist humans in evaluating LLM outputs. Yet LLM-generated evaluators simply inherit all the problems of the LLMs they evaluate, requiring further human validation. We present a mixed-initiative approach to “validate the validators”— aligning LLM-generated evaluation functions (be it prompts or code) with human requirements. Our interface, EvalGen, provides automated assistance to users in generating evaluation criteria and implementing assertions. While generating candidate implementations (Python functions, LLM grader prompts), EvalGen asks humans to grade a subset of LLM outputs; this feedback is used to select implementations that better align with user grades. A qualitative study finds overall support for EvalGen but underscores the subjectivity and iterative nature of alignment. In particular, we identify a phenomenon we dub criteria drift: users need criteria to grade outputs, but grading outputs helps users define criteria. What is more, some criteria appear dependent on the specific LLM outputs observed (rather than independent and definable a priori), raising serious questions for approaches that assume the independence of evaluation from observation of model outputs. We present our interface and implementation details, a comparison of our algorithm with a baseline approach, and implications for the design of future LLM evaluation assistants. + + + +### LlamaTouch: A Faithful and Scalable Testbed for Mobile UI Task Automation +Authors: Li Zhang, Shihe Wang, Xianqing Jia, Zhihan Zheng, Yunhe Yan, Longxi Gao, Yuanchun Li, Mengwei Xu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170831) + +Abstract: The emergent large language/multimodal models facilitate the evolution of mobile agents, especially in mobile UI task automation. However, existing evaluation approaches, which rely on human validation or established datasets to compare agent-predicted actions with predefined action sequences, are unscalable and unfaithful. To overcome these limitations, this paper presents LlamaTouch, a testbed for on-device mobile UI task execution and faithful, scalable task evaluation. By observing that the task execution process only transfers UI states, LlamaTouch employs a novel evaluation approach that only assesses whether an agent traverses all manually annotated, essential application/system states. LlamaTouch comprises three key techniques: (1) On-device task execution that enables mobile agents to interact with realistic mobile environments for task execution. (2) Fine-grained UI component annotation that merges pixel-level screenshots and textual screen hierarchies to explicitly identify and precisely annotate essential UI components with a rich set of designed annotation primitives. (3) A multi-level application state matching algorithm that utilizes exact and fuzzy matching to accurately detect critical information in each screen, even with unpredictable UI layout/content dynamics. LlamaTouch currently incorporates four mobile agents and 496 tasks, encompassing both tasks in the widely-used datasets and our self-constructed ones to cover more diverse mobile applications. Evaluation results demonstrate LlamaTouch’s high faithfulness of evaluation in real-world mobile environments and its better scalability than human validation. LlamaTouch also enables easy task annotation and integration of new mobile agents. Code and dataset are publicly available at https://github.com/LlamaTouch/LlamaTouch. + + + +### Clarify: Improving Model Robustness With Natural Language Corrections +Authors: Yoonho Lee, Michelle Lam, Helena Vasconcelos, Michael Bernstein, Chelsea Finn + +[Link](https://programs.sigchi.org/uist/2024/program/content/170784) + +Abstract: The standard way to teach models is by feeding them lots of data. However, this approach often teaches models incorrect ideas because they pick up on misleading signals in the data. To prevent such misconceptions, we must necessarily provide additional information beyond the training data. Prior methods incorporate additional instance-level supervision, such as labels for misleading features or additional labels for debiased data. However, such strategies require a large amount of labeler effort. We hypothesize that people are good at providing textual feedback at the concept level, a capability that existing teaching frameworks do not leverage. We propose Clarify, a novel interface and method for interactively correcting model misconceptions. Through Clarify, users need only provide a short text description of a model's consistent failure patterns. Then, in an entirely automated way, we use such descriptions to improve the training process. Clarify is the first end-to-end system for user model correction. Our user studies show that non-expert users can successfully describe model misconceptions via Clarify, leading to increased worst-case performance in two datasets. We additionally conduct a case study on a large-scale image dataset, ImageNet, using Clarify to find and rectify 31 novel hard subpopulations. + + + +### "The Data Says Otherwise" – Towards Automated Fact-checking and Communication of Data Claims +Authors: Yu Fu, Shunan Guo, Jane Hoffswell, Victor S. Bursztyn, Ryan Rossi, John Stasko + +[Link](https://programs.sigchi.org/uist/2024/program/content/170762) + +Abstract: Fact-checking data claims requires data evidence retrieval and analysis, which can become tedious and intractable when done manually. This work presents Aletheia, an automated fact-checking prototype designed to facilitate data claims verification and enhance data evidence communication. For verification, we utilize a pre-trained LLM to parse the semantics for evidence retrieval. To effectively communicate the data evidence, we design representations in two forms: data tables and visualizations, tailored to various data fact types. Additionally, we design interactions that showcase a real-world application of these techniques. We evaluate the performance of two core NLP tasks with a curated dataset comprising 400 data claims and compare the two representation forms regarding viewers’ assessment time, confidence, and preference via a user study with 20 participants. The evaluation offers insights into the feasibility and bottlenecks of using LLMs for data fact-checking tasks, potential advantages and disadvantages of using visualizations over data tables, and design recommendations for presenting data evidence. + + + + +## A11y +### ProgramAlly: Creating Custom Visual Access Programs via Multi-Modal End-User Programming +Authors: Jaylin Herskovitz, Andi Xu, Rahaf Alharbi, Anhong Guo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170960) + +Abstract: Existing visual assistive technologies are built for simple and common use cases, and have few avenues for blind people to customize their functionalities. Drawing from prior work on DIY assistive technology, this paper investigates end-user programming as a means for users to create and customize visual access programs to meet their unique needs. We introduce ProgramAlly, a system for creating custom filters for visual information, e.g., 'find NUMBER on BUS', leveraging three end-user programming approaches: block programming, natural language, and programming by example. To implement ProgramAlly, we designed a representation of visual filtering tasks based on scenarios encountered by blind people, and integrated a set of on-device and cloud models for generating and running these programs. In user studies with 12 blind adults, we found that participants preferred different programming modalities depending on the task, and envisioned using visual access programs to address unique accessibility challenges that are otherwise difficult with existing applications. Through ProgramAlly, we present an exploration of how blind end-users can create visual access programs to customize and control their experiences. + + + +### Accessible Gesture Typing on Smartphones for People with Low Vision +Authors: Dan Zhang, Zhi Li, Vikas Ashok, William H Seiple, IV Ramakrishnan, Xiaojun Bi + +[Link](https://programs.sigchi.org/uist/2024/program/content/170887) + +Abstract: While gesture typing is widely adopted on touchscreen keyboards, its support for low vision users is limited. We have designed and implemented two keyboard prototypes, layout-magnified and key-magnified keyboards, to enable gesture typing for people with low vision. Both keyboards facilitate uninterrupted access to all keys while the screen magnifier is active, allowing people with low vision to input text with one continuous stroke. Furthermore, we have created a kinematics-based decoding algorithm to accommodate the typing behavior of people with low vision. This algorithm can decode the gesture input even if the gesture trace deviates from a pre-defined word template, and the starting position of the gesture is far from the starting letter of the target word. Our user study showed that the key-magnified keyboard achieved 5.28 words per minute, 27.5% faster than a conventional gesture typing keyboard with voice feedback. + + + +### AccessTeleopKit: A Toolkit for Creating Accessible Web-Based Interfaces for Tele-Operating an Assistive Robot +Authors: Vinitha Ranganeni, Varad Dhat, Noah Ponto, Maya Cakmak + +[Link](https://programs.sigchi.org/uist/2024/program/content/170825) + +Abstract: Mobile manipulator robots, which can move around and physically interact with their environments, can empower people with motor limitations to independently carry out many activities of daily living. While many interfaces have been developed for tele-operating complex robots, most of them are not accessible to people with severe motor limitations. Further, most interfaces are rigid with limited configurations and are not readily available to download and use. To address these barriers, we developed AccessTeleopKit: an open-source toolkit for creating custom and accessible robot tele-operation interfaces based on cursor-and-click input for the Stretch 3 mobile-manipulator. With AccessTeleopKit users can add, remove, and rearrange components such as buttons and camera views, and select between a variety of control modes. We describe the participatory and iterative design process that led to the current implementation of AccessTeleopKit, involving three long-term deployments of the robot in the home of a quadriplegic user. We demonstrate how AccessTeleopKit allowed the user to create different interfaces for different tasks and the diversity of tasks it allowed the user to carry out. We also present two studies involving six additional users with severe motor limitations, demonstrating the power of AccessTeleopKit in creating custom interfaces for different user needs and preferences. + + + +### Memory Reviver: Supporting Photo-Collection Reminiscence for People with Visual Impairment via a Proactive Chatbot +Authors: Shuchang Xu, Chang Chen, Zichen LIU, Xiaofu Jin, Linping Yuan, Yukang Yan, Huamin Qu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170852) + +Abstract: Reminiscing with photo collections offers significant psychological benefits but poses challenges for people with visual impairment (PVI). Their current reliance on sighted help restricts the flexibility of this activity. In response, we explored using a chatbot in a preliminary study. We identified two primary challenges that hinder effective reminiscence with a chatbot: the scattering of information and a lack of proactive guidance. To address these limitations, we present Memory Reviver, a proactive chatbot that helps PVI reminisce with a photo collection through natural language communication. Memory Reviver incorporates two novel features: (1) a Memory Tree, which uses a hierarchical structure to organize the information in a photo collection; and (2) a Proactive Strategy, which actively delivers information to users at proper conversation rounds. Evaluation with twelve PVI demonstrated that Memory Reviver effectively facilitated engaging reminiscence, enhanced understanding of photo collections, and delivered natural conversational experiences. Based on our findings, we distill implications for supporting photo reminiscence and designing chatbots for PVI. + + + +### VizAbility: Enhancing Chart Accessibility with LLM-based Conversational Interaction +Authors: Joshua Gorniak, Yoon Kim, Donglai Wei, Nam Wook Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/171009) + +Abstract: Traditional accessibility methods like alternative text and data tables typically underrepresent data visualization's full potential. Keyboard-based chart navigation has emerged as a potential solution, yet efficient data exploration remains challenging. We present VizAbility, a novel system that enriches chart content navigation with conversational interaction, enabling users to use natural language for querying visual data trends. VizAbility adapts to the user's navigation context for improved response accuracy and facilitates verbal command-based chart navigation. Furthermore, it can address queries for contextual information, designed to address the needs of visually impaired users. We designed a large language model (LLM)-based pipeline to address these user queries, leveraging chart data & encoding, user context, and external web knowledge. We conducted both qualitative and quantitative studies to evaluate VizAbility's multimodal approach. We discuss further opportunities based on the results, including improved benchmark testing, incorporation of vision models, and integration with visualization workflows. + + + +### Computational Trichromacy Reconstruction: Empowering the Color-Vision Deficient to Recognize Colors Using Augmented Reality +Authors: Yuhao Zhu, Ethan Chen, Colin Hascup, Yukang Yan, Gaurav Sharma + +[Link](https://programs.sigchi.org/uist/2024/program/content/170991) + +Abstract: We propose an assistive technology that helps individuals with Color Vision Deficiencies (CVD) to recognize/name colors. +A dichromat's color perception is a reduced two-dimensional (2D) subset of a normal +trichromat's three dimensional color (3D) perception, leading to confusion when visual stimuli that appear identical to the dichromat are referred to by different color names. +Using our proposed system, CVD individuals can interactively induce distinct perceptual changes to originally confusing colors via a computational color space transformation. +By combining their original 2D precepts for colors with the discriminative changes, a three dimensional color space is reconstructed, where the dichromat can learn to resolve color name confusions and accurately recognize colors. +Our system is implemented as an Augmented Reality (AR) interface on smartphones, where users interactively control the rotation through swipe gestures and observe the induced color shifts in the camera view or in a displayed image. Through psychophysical experiments and a longitudinal user study, we demonstrate that such rotational color shifts have discriminative power (initially confusing colors become distinct under rotation) and exhibit structured perceptual shifts dichromats can learn with modest training. The AR App is also evaluated in two real-world scenarios (building with lego blocks and interpreting artistic works); users all report positive experience in using the App to recognize object colors that they otherwise could not. + + + + +## Contextual Augmentations +### StreetNav: Leveraging Street Cameras to Support Precise Outdoor Navigation for Blind Pedestrians +Authors: Gaurav Jain, Basel Hindi, Zihao Zhang, Koushik Srinivasula, Mingyu Xie, Mahshid Ghasemi, Daniel Weiner, Sophie Ana Paris, Xin Yi Therese Xu, Michael Malcolm, Mehmet Kerem Turkcan, Javad Ghaderi, Zoran Kostic, Gil Zussman, Brian Smith + +[Link](https://programs.sigchi.org/uist/2024/program/content/171003) + +Abstract: Blind and low-vision (BLV) people rely on GPS-based systems for outdoor navigation. GPS's inaccuracy, however, causes them to veer off track, run into obstacles, and struggle to reach precise destinations. While prior work has made precise navigation possible indoors via hardware installations, enabling this outdoors remains a challenge. Interestingly, many outdoor environments are already instrumented with hardware such as street cameras. In this work, we explore the idea of repurposing existing street cameras for outdoor navigation. Our community-driven approach considers both technical and sociotechnical concerns through engagements with various stakeholders: BLV users, residents, business owners, and Community Board leadership. The resulting system, StreetNav, processes a camera's video feed using computer vision and gives BLV pedestrians real-time navigation assistance. Our evaluations show that StreetNav guides users more precisely than GPS, but its technical performance is sensitive to environmental occlusions and distance from the camera. We discuss future implications for deploying such systems at scale. + + + +### WorldScribe: Towards Context-Aware Live Visual Descriptions +BEST_PAPER + +Authors: Ruei-Che Chang, Yuxuan Liu, Anhong Guo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170940) + +Abstract: Automated live visual descriptions can aid blind people in understanding their surroundings with autonomy and independence. However, providing descriptions that are rich, contextual, and just-in-time has been a long-standing challenge in accessibility. In this work, we develop WorldScribe, a system that generates automated live real-world visual descriptions that are customizable and adaptive to users' contexts: (i) WorldScribe's descriptions are tailored to users' intents and prioritized based on semantic relevance. (ii) WorldScribe is adaptive to visual contexts, e.g., providing consecutively succinct descriptions for dynamic scenes, while presenting longer and detailed ones for stable settings. (iii) WorldScribe is adaptive to sound contexts, e.g., increasing volume in noisy environments, or pausing when conversations start. Powered by a suite of vision, language, and sound recognition models, WorldScribe introduces a description generation pipeline that balances the tradeoffs between their richness and latency to support real-time use. The design of WorldScribe is informed by prior work on providing visual descriptions and a formative study with blind participants. Our user study and subsequent pipeline evaluation show that WorldScribe can provide real-time and fairly accurate visual descriptions to facilitate environment understanding that is adaptive and customized to users' contexts. Finally, we discuss the implications and further steps toward making live visual descriptions more context-aware and humanized. + + + +### CookAR: Affordance Augmentations in Wearable AR to Support Kitchen Tool Interactions for People with Low Vision +Authors: Jaewook Lee, Andrew Tjahjadi, Jiho Kim, Junpu Yu, Minji Park, Jiawen Zhang, Jon Froehlich, Yapeng Tian, Yuhang Zhao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170874) + +Abstract: Cooking is a central activity of daily living, supporting independence as well as mental and physical health. However, prior work has highlighted key barriers for people with low vision (LV) to cook, particularly around safely interacting with tools, such as sharp knives or hot pans. Drawing on recent advancements in computer vision (CV), we present CookAR, a head-mounted AR system with real-time object affordance augmentations to support safe and efficient interactions with kitchen tools. To design and implement CookAR, we collected and annotated the first egocentric dataset of kitchen tool affordances, fine-tuned an affordance segmentation model, and developed an AR system with a stereo camera to generate visual augmentations. To validate CookAR, we conducted a technical evaluation of our fine-tuned model as well as a qualitative lab study with 10 LV participants for suitable augmentation design. Our technical evaluation demonstrates that our model outperforms the baseline on our tool affordance dataset, while our user study indicates a preference for affordance augmentations over the traditional whole object augmentations. + + + +### DesignChecker: Visual Design Support for Blind and Low Vision Web Developers +Authors: Mina Huh, Amy Pavel + +[Link](https://programs.sigchi.org/uist/2024/program/content/170953) + +Abstract: Blind and low vision (BLV) developers create websites to share knowledge and showcase their work. A well-designed website can engage audiences and deliver information effectively, yet it remains challenging for BLV developers to review their web designs. We conducted interviews with BLV developers (N=9) and analyzed 20 websites created by BLV developers. BLV developers created highly accessible websites but wanted to assess the usability of their websites for sighted users and follow the design standards of other websites. They also encountered challenges using screen readers to identify illegible text, misaligned elements, and inharmonious colors. We present DesignChecker, a browser extension that helps BLV developers improve their web designs. With DesignChecker, users can assess their current design by comparing it to visual design guidelines, a reference website of their choice, or a set of similar websites. DesignChecker also identifies the specific HTML elements that violate design guidelines and suggests CSS changes for improvements. Our user study participants (N=8) recognized more visual design errors than using their typical workflow and expressed enthusiasm about using DesignChecker in the future. + + + + +## Dynamic Objects & Materials +### MagneDot: Integrated Fabrication and Actuation Methods of Dot-Based Magnetic Shape Displays +Authors: Lingyun Sun, Yitao Fan, Boyu Feng, Yifu Zhang, Deying Pan, Yiwen Ren, Yuyang Zhang, Qi Wang, Ye Tao, Guanyun Wang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170860) + +Abstract: This paper presents MagneDot, a novel method for making interactive magnetic shape displays through an integrated fabrication process. Magnetic soft materials can potentially create fast, responsive morphing structures for interactions. However, novice users and designers typically do not have access to sophisticated equipment and materials or cannot afford heavy labor to create interactive objects based on this material. Modified from an open-source 3D printer, the fabrication system of MagneDot integrates the processes of mold-making, pneumatic extrusion, magnetization, and actuation, using cost-effective materials only. By providing a design tool, MagneDot allows users to generate G-codes for fabricating and actuating displays of various morphing effects. Finally, a series of design examples demonstrate the possibilities of shape displays enabled by MagneDot. + + + +### CARDinality: Interactive Card-shaped Robots with Locomotion and Haptics using Vibration +Authors: Aditya Retnanto, Emilie Faracci, Anup Sathya, Yu-Kai Hung, Ken Nakagaki + +[Link](https://programs.sigchi.org/uist/2024/program/content/170995) + +Abstract: This paper introduces a novel approach to interactive robots by leveraging the form-factor of cards to create thin robots equipped with vibrational capabilities for locomotion and haptic feedback. The system is composed of flat-shaped robots with on-device sensing and wireless control, which offer lightweight portability and scalability. This research introduces a hardware prototype to explore the possibility of ‘vibration-based omni-directional sliding locomotion’. Applications include augmented card playing, educational tools, and assistive technology, which showcase CARDinality’s versatility in tangible interaction. + + + + +### PortaChrome: A Portable Contact Light Source for Integrated Re-Programmable Multi-Color Textures +Authors: Yunyi Zhu, Cedric Honnet, Yixiao Kang, Junyi Zhu, Angelina Zheng, Kyle Heinz, Grace Tang, Luca Musk, Michael Wessely, Stefanie Mueller + +[Link](https://programs.sigchi.org/uist/2024/program/content/170742) + +Abstract: In this paper, we present PortaChrome, a portable light source that can be attached to everyday objects to reprogram the color and texture of surfaces that come in contact with them. When PortaChrome makes contact with objects previously coated with photochromic dye, the UV and RGB LEDs inside PortaChrome create multi-color textures on the objects. In contrast to prior work, which used projectors for the color-change, PortaChrome has a thin and flexible form factor, which allows the color-change process to be integrated into everyday user interaction. Because of the close distance between the light source and the photochromic object, PortaChrome creates color textures in less than 4 minutes on average, which is 8 times faster than prior work. We demonstrate PortaChrome with four application examples, including data visualizations on textiles and dynamic designs on wearables. + + + +### Augmented Object Intelligence with XR-Objects +Authors: Mustafa Doga Dogan, Eric Gonzalez, Karan Ahuja, Ruofei Du, Andrea Colaço, Johnny Lee, Mar Gonzalez-Franco, David Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170733) + +Abstract: Seamless integration of physical objects as interactive digital entities remains a challenge for spatial computing. This paper explores Augmented Object Intelligence (AOI) in the context of XR, an interaction paradigm that aims to blur the lines between digital and physical by equipping real-world objects with the ability to interact as if they were digital, where every object has the potential to serve as a portal to digital functionalities. Our approach utilizes real-time object segmentation and classification, combined with the power of Multimodal Large Language Models (MLLMs), to facilitate these interactions without the need for object pre-registration. We implement the AOI concept in the form of XR-Objects, an open-source prototype system that provides a platform for users to engage with their physical environment in contextually relevant ways using object-based context menus. This system enables analog objects to not only convey information but also to initiate digital actions, such as querying for details or executing tasks. Our contributions are threefold: (1) we define the AOI concept and detail its advantages over traditional AI assistants, (2) detail the XR-Objects system’s open-source design and implementation, and (3) show its versatility through various use cases and a user study. + + + + +## Generating Visuals +### ShadowMagic: Designing Human-AI Collaborative Support for Comic Professionals’ Shadowing +Authors: Amrita Ganguly, Chuan Yan, John Chung, Tong Sun, YOON KIHEON, Yotam Gingold, Sungsoo Ray Hong + +[Link](https://programs.sigchi.org/uist/2024/program/content/170726) + +Abstract: Shadowing allows artists to convey realistic volume and emotion of characters in comic colorization. While AI technologies have the potential to improve professionals’ shadowing experience, current practice is manual and time-consuming. To understand how we can improve their shadowing experience, we conducted interviews with 5 professionals. We found that professionals’ level of engagement can vary depending on semantics, such as characters’ faces or hair. We also found they spent time on shadow “landscaping”—deciding where to place large shadow regions to create a realistic volumetric presentation while the final results can vary dramatically depending on their “staging” and “attention guiding” needs. We discovered they would accept AI suggestions for less engaging semantic parts or landscaping, while needing the capability to adjust details. Based on our observations, we developed ShadowMagic, which (1) generates AI-driven shadows based on commonly used light directions, (2) enables users to selectively choose results depending on semantics, and (3) allows users to complete shadow areas themselves for further perfection. Through a summative evaluation with 5 professionals, we found that they were significantly more satisfied with our AI-driven results compared to a baseline. We also found that ShadowMagic’s “step by step” workflow helps participants more easily adopt AI-driven results. We conclude by providing implications. + + + +### What's the Game, then? Opportunities and Challenges for Runtime Behavior Generation +BEST_PAPER + +Authors: Nicholas Jennings, Han Wang, Isabel Li, James Smith, Bjoern Hartmann + +[Link](https://programs.sigchi.org/uist/2024/program/content/170924) + +Abstract: Procedural content generation (PCG), the process of algorithmically creating game components instead of manually, has been a common tool of game development for decades. Recent advances in large language models (LLMs) enable the generation of game behaviors based on player input at runtime. Such code generation brings with it the possibility of entirely new gameplay interactions that may be difficult to integrate with typical game development workflows. We explore these implications through GROMIT, a novel LLM-based runtime behavior generation system for Unity. When triggered by a player action, GROMIT generates a relevant behavior which is compiled without developer intervention and incorporated into the game. We create three demonstration scenarios with GROMIT to investigate how such a technology might be used in game development. In a system evaluation we find that our implementation is able to produce behaviors that result in significant downstream impacts to gameplay. We then conduct an interview study with n=13 game developers using GROMIT as a probe to elicit their current opinion on runtime behavior generation tools, and enumerate the specific themes curtailing the wider use of such tools. We find that the main themes of concern are quality considerations, community expectations, and fit with developer workflows, and that several of the subthemes are unique to runtime behavior generation specifically. We outline a future work agenda to address these concerns, including the need for additional guardrail systems for behavior generation. + + + +### StyleFactory: Towards Better Style Alignment in Image Creation through Style-Strength-Based Control and Evaluation +Authors: Mingxu Zhou, Dengming Zhang, Weitao You, Ziqi Yu, Yifei Wu, Chenghao Pan, Huiting Liu, Tianyu Lao, Pei Chen + +[Link](https://programs.sigchi.org/uist/2024/program/content/170929) + +Abstract: Generative AI models have been widely used for image creation. However, generating images that are well-aligned with users' personal styles on aesthetic features (e.g., color and texture) can be challenging due to the poor style expression and interpretation between humans and models. Through a formative study, we observed that participants showed a clear subjective perception of the desired style and variations in its strength, which directly inspired us to develop style-strength-based control and evaluation. Building on this, we present StyleFactory, an interactive system that helps users achieve style alignment. Our interface enables users to rank images based on their strengths in the desired style and visualizes the strength distribution of other images in that style from the model's perspective. In this way, users can evaluate the understanding gap between themselves and the model, and define well-aligned personal styles for image creation through targeted iterations. Our technical evaluation and user study demonstrate that StyleFactory accurately generates images in specific styles, effectively facilitates style alignment in image creation workflow, stimulates creativity, and enhances the user experience in human-AI interactions. + + + +### AutoSpark: Supporting Automobile Appearance Design Ideation with Kansei Engineering and Generative AI +Authors: Liuqing Chen, Qianzhi Jing, Yixin Tsang, Qianyi Wang, Ruocong Liu, Duowei Xia, Yunzhan Zhou, Lingyun Sun + +[Link](https://programs.sigchi.org/uist/2024/program/content/170878) + +Abstract: Rapid creation of novel product appearance designs that align with consumer emotional requirements poses a significant challenge. Text-to-image models, with their excellent image generation capabilities, have demonstrated potential in providing inspiration to designers. However, designers still encounter issues including aligning emotional needs, expressing design intentions, and comprehending generated outcomes in practical applications. To address these challenges, we introduce AutoSpark, an interactive system that integrates Kansei Engineering and generative AI to provide creativity support for designers in creating automobile appearance designs that meet emotional needs. AutoSpark employs a Kansei Engineering engine powered by generative AI and a semantic network to assist designers in emotional need alignment, design intention expression, and prompt crafting. It also facilitates designers' understanding and iteration of generated results through fine-grained image-image similarity comparisons and text-image relevance assessments. The design-thinking map within its interface aids in managing the design process. Our user study indicates that AutoSpark effectively aids designers in producing designs that are more aligned with emotional needs and of higher quality compared to a baseline system, while also enhancing the designers' experience in the human-AI co-creation process. + + + + +## Movement-based UIs +### Feminist Interaction Techniques: Social Consent Signals to Deter NCIM Screenshots +Authors: Li Qiwei, Francesca Lameiro, Shefali Patel, Cristi Isaula-Reyes, Eytan Adar, Eric Gilbert, Sarita Schoenebeck + +[Link](https://programs.sigchi.org/uist/2024/program/content/170858) + +Abstract: Non-consensual Intimate Media (NCIM) refers to the distribution of sexual or intimate content without consent. NCIM is common and causes significant emotional, financial, and reputational harm. We developed Hands-Off, an interaction technique for messaging applications that deters non-consensual screenshots. Hands-Off requires recipients to perform a hand gesture in the air, above the device, to unlock media—which makes simultaneous screenshotting difficult. A lab study shows that Hands-Off gestures are easy +to perform and reduce non-consensual screenshots by 67%. We conclude by generalizing this approach and introduce the idea of Feminist Interaction Techniques (FIT), interaction techniques that encode feminist values and speak to societal problems, and reflect on FIT’s opportunities and limitations. + + + +### Effects of Computer Mouse Lift-off Distance Settings in Mouse Lifting Action +Authors: Munjeong Kim, Sunjun Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170957) + +Abstract: This study investigates the effect of Lift-off Distance (LoD) on a computer mouse, which refers to the height at which a mouse sensor stops tracking when lifted off the surface. Although a low LoD is generally preferred to avoid unintentional cursor movement in mouse lifting (=clutching), especially in first-person shooter games, it may reduce tracking stability. +We conducted a psychophysical experiment to measure the perceptible differences between LoD levels and quantitatively measured the unintentional cursor movement error and tracking stability at four levels of LoD while users performed mouse lifting. The results showed a trade-off between movement error and tracking stability at varying levels of LoD. Our findings offer valuable information on optimal LoD settings, which could serve as a guide for choosing a proper mouse device for enthusiastic gamers. + + + +### DisMouse: Disentangling Information from Mouse Movement Data +Authors: Guanhua Zhang, Zhiming Hu, Andreas Bulling + +[Link](https://programs.sigchi.org/uist/2024/program/content/170847) + +Abstract: Mouse movement data contain rich information about users, performed tasks, and user interfaces, but separating the respective components remains challenging and unexplored. As a first step to address this challenge, we propose DisMouse – the first method to disentangle user-specific and user-independent information and stochastic variations from mouse movement data. At the core of our method is an autoencoder trained in a semi-supervised fashion, consisting of a self-supervised denoising diffusion process and a supervised contrastive user identification module. Through evaluations on three datasets, we show that DisMouse 1) captures complementary information of mouse input, hence providing an interpretable framework for modelling mouse movements, 2) can be used to produce refined features, thus enabling various applications such as personalised and variable mouse data generation, and 3) generalises across different datasets. Taken together, our results underline the significant potential of disentangled representation learning for explainable, controllable, and generalised mouse behaviour modelling. + + + +### Wheeler: A Three-Wheeled Input Device for Usable, Efficient, and Versatile Non-Visual Interaction +HONORABLE_MENTION + +Authors: Md Touhidul Islam, Noushad Sojib, Imran Kabir, Ashiqur Rahman Amit, Mohammad Ruhul Amin, Syed Masum Billah + +[Link](https://programs.sigchi.org/uist/2024/program/content/170848) + +Abstract: Blind users rely on keyboards and assistive technologies like screen readers to interact with user interface (UI) elements. In modern applications with complex UI hierarchies, navigating to different UI elements poses a significant accessibility challenge. Users must listen to screen reader audio descriptions and press relevant keyboard keys one at a time. This paper introduces Wheeler, a novel three-wheeled, mouse-shaped stationary input device, to address this issue. Informed by participatory sessions, Wheeler enables blind users to navigate up to three hierarchical levels in an app independently using three wheels instead of navigating just one level at a time using a keyboard. The three wheels also offer versatility, allowing users to repurpose them for other tasks, such as 2D cursor manipulation. A study with 12 blind users indicates a significant reduction (40%) in navigation time compared to using a keyboard. Further, a diary study with our blind co-author highlights Wheeler's additional benefits, such as accessing UI elements with partial metadata and facilitating mixed-ability collaboration. + + + + +## Hacking Perception +### Predicting the Limits: Tailoring Unnoticeable Hand Redirection Offsets in Virtual Reality to Individuals’ Perceptual Boundaries +Authors: Martin Feick, Kora Regitz, Lukas Gehrke, André Zenner, Anthony Tang, Tobias Jungbluth, Maurice Rekrut, Antonio Krüger + +[Link](https://programs.sigchi.org/uist/2024/program/content/171017) + +Abstract: Many illusion and interaction techniques in Virtual Reality (VR) rely on Hand Redirection (HR), which has proved to be effective as long as the introduced offsets between the position of the real and virtual hand do not noticeably disturb the user experience. Yet calibrating HR offsets is a tedious and time-consuming process involving psychophysical experimentation, and the resulting thresholds are known to be affected by many variables---limiting HR's practical utility. As a result, there is a clear need for alternative methods that allow tailoring HR to the perceptual boundaries of individual users. We conducted an experiment with 18 participants combining movement, eye gaze and EEG data to detect HR offsets Below, At, and Above individuals' detection thresholds. Our results suggest that we can distinguish HR At and Above from no HR. Our exploration provides a promising new direction with potentially strong implications for the broad field of VR illusions. + + + +### Modulating Heart Activity and Task Performance using Haptic Heartbeat Feedback: A Study Across Four Body Placements +Authors: Andreia Valente, Dajin Lee, Seungmoon Choi, Mark Billinghurst, Augusto Esteves + +[Link](https://programs.sigchi.org/uist/2024/program/content/170839) + +Abstract: This paper explores the impact of vibrotactile haptic feedback on heart activity when the feedback is provided at four different body locations (chest, wrist, neck, and ankle) and with two feedback rates (50 bpm and 110 bpm). A user study found that the neck placement resulted in higher heart rates and lower heart rate variability, and higher frequencies correlated with increased heart rates and decreased heart rate variability. The chest was preferred in self-reported metrics, and neck placement was perceived as less satisfying, harmonious, and immersive. This research contributes to understanding the interplay between psychological experiences and physiological responses when using haptic biofeedback resembling real body signals. + + + +### Augmented Breathing via Thermal Feedback in the Nose +Authors: Jas Brooks, Alex Mazursky, Janice Hixon, Pedro Lopes + +[Link](https://programs.sigchi.org/uist/2024/program/content/170728) + +Abstract: We propose, engineer, and study a novel method to augment the feeling of breathing—enabling interactive applications to let users feel like they are inhaling more/less air (perceived nasal airflow). We achieve this effect by cooling or heating the nose in sync with the user’s inhalation. Our illusion builds on the physiology of breathing: we perceive our breath predominantly through the cooling of our nasal cavities during inhalation. This is why breathing in a “fresh” cold environment feels easier than in a “stuffy” hot environment, even when the inhaled volume is the same. Our psychophysical study confirmed that our in-nose temperature stimulation significantly influenced breathing perception in both directions: making it feel harder & easier to breathe. Further, we found that ~90% of the trials were described as a change in perceived airflow/breathing, while only ~8% as temperature. Following, we engineered a compact device worn across the septum that uses Peltier elements. We illustrate the potential of this augmented breathing in interactive contexts, such as for virtual reality (e.g., rendering ease of breathing crisp air or difficulty breathing with a deteriorated gas mask) and everyday interactions (e.g., in combination with a relaxation application or to alleviate the perceived breathing resistance when wearing a mask). + + + +### Thermal In Motion: Designing Thermal Flow Illusions with Tactile and Thermal Interaction +Authors: Yatharth Singhal, Daniel Honrales, Haokun Wang, Jin Ryong Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170896) + +Abstract: This study presents a novel method for creating moving thermal sensations by integrating the thermal referral illusion with tactile motion. Conducted through three experiments on human forearms, the first experiment examined the impact of temperature and thermal actuator placement on perceived thermal motion, finding the clearest perception with a centrally positioned actuator under both hot and cold conditions. The second experiment identified the speed thresholds of perceived thermal motion, revealing a wider detectable range in hot conditions (1.8 cm/s to 9.5cm/s) compared to cold conditions (2.4cm/s to 5.0cm/s). Finally, we integrated our approach into virtual reality (VR) to assess its feasibility through two interaction scenarios. Our results shed light on the comprehension of thermal perception and its integration with tactile cues, promising significant advancements in incorporating thermal motion into diverse thermal interfaces for immersive VR experiences. + + + + +## New realities +### SIM2VR: Towards Automated Biomechanical Testing in VR +Authors: Florian Fischer, Aleksi Ikkala, Markus Klar, Arthur Fleig, Miroslav Bachinski, Roderick Murray-Smith, Perttu Hämäläinen, Antti Oulasvirta, Jörg Müller + +[Link](https://programs.sigchi.org/uist/2024/program/content/170989) + +Abstract: Automated biomechanical testing has great potential for the development of VR applications, as initial insights into user behaviour can be gained in silico early in the design process. +In particular, it allows prediction of user movements and ergonomic variables, such as fatigue, prior to conducting user studies. +However, there is a fundamental disconnect between simulators hosting state-of-the-art biomechanical user models and simulators used to develop and run VR applications. +Existing user simulators often struggle to capture the intricacies of real-world VR applications, reducing ecological validity of user predictions. +In this paper, we introduce SIM2VR, a system that aligns user simulation with a given VR application by establishing a continuous closed loop between the two processes. +This, for the first time, enables training simulated users directly in the same VR application that real users interact with. +We demonstrate that SIM2VR can predict differences in user performance, ergonomics and strategies in a fast-paced, dynamic arcade game. In order to expand the scope of automated biomechanical testing beyond simple visuomotor tasks, advances in cognitive models and reward function design will be needed. + + + +### Hands-on, Hands-off: Gaze-Assisted Bimanual 3D Interaction +Authors: Mathias Lystbæk, Thorbjørn Mikkelsen, Roland Krisztandl, Eric Gonzalez, Mar Gonzalez-Franco, Hans Gellersen, Ken Pfeuffer + +[Link](https://programs.sigchi.org/uist/2024/program/content/171002) + +Abstract: Extended Reality (XR) systems with hand-tracking support direct manipulation of objects with both hands. A common interaction in this context is for the non-dominant hand (NDH) to orient an object for input by the dominant hand (DH). We explore bimanual interaction with gaze through three new modes of interaction where the input of the NDH, DH, or both hands is indirect based on Gaze+Pinch. These modes enable a new dynamic interplay between our hands, allowing flexible alternation between and pairing of complementary operations. Through applications, we demonstrate several use cases in the context of 3D modelling, where users exploit occlusion-free, low-effort, and fluid two-handed manipulation. To gain a deeper understanding of each mode, we present a user study on an asymmetric rotate-translate task. Most participants preferred indirect input with both hands for lower physical effort, without a penalty on user performance. Otherwise, they preferred modes where the NDH oriented the object directly, supporting preshaping of the hand, which is more challenging with indirect gestures. The insights gained are of relevance for the design of XR interfaces that aim to leverage eye and hand input in tandem. + + + +### Pro-Tact: Hierarchical Synthesis of Proprioception and Tactile Exploration for Eyes-Free Ray Pointing on Out-of-View VR Menus +Authors: Yeonsu Kim, Jisu Yim, Kyunghwan Kim, Yohan Yun, Geehyuk Lee + +[Link](https://programs.sigchi.org/uist/2024/program/content/170805) + +Abstract: We introduce Pro-Tact, a novel eyes-free pointing technique for interacting with out-of-view (OoV) VR menus. This technique combines rapid rough pointing using proprioception with fine-grain adjustments through tactile exploration, enabling menu interaction without visual attention. Our user study demonstrated that Pro-Tact allows users to select menu items accurately (95% accuracy for 54 items) in an eyes-free manner, with reduced fatigue and sickness compared to eyes-engaged interaction. Additionally, we observed that participants voluntarily interacted with OoV menus eyes-free when Pro-Tact's tactile feedback was provided in practical VR application usage contexts. This research contributes by introducing the novel interaction technique, Pro-Tact, and quantitatively evaluating its benefits in terms of performance, user experience, and user preference in OoV menu interactions. + + + +### GradualReality: Enhancing Physical Object Interaction in Virtual Reality via Interaction State-Aware Blending +Authors: HyunA Seo, Juheon Yi, Rajesh Balan, Youngki Lee + +[Link](https://programs.sigchi.org/uist/2024/program/content/170920) + +Abstract: We present GradualReality, a novel interface enabling a Cross Reality experience that includes gradual interaction with physical objects in a virtual environment and supports both presence and usability. Daily Cross Reality interaction is challenging as the user's physical object interaction state is continuously changing over time, causing their attention to frequently shift between the virtual and physical worlds. As such, presence in the virtual environment and seamless usability for interacting with physical objects should be maintained at a high level. To address this issue, we present an Interaction State-Aware Blending approach that (i) balances immersion and interaction capability and (ii) provides a fine-grained, gradual transition between virtual and physical worlds. The key idea includes categorizing the flow of physical object interaction into multiple states and designing novel blending methods that offer optimal presence and sufficient physical awareness at each state. We performed extensive user studies and interviews with a working prototype and demonstrated that GradualReality provides better Cross Reality experiences compared to baselines. + + + +### StegoType: Surface Typing from Egocentric Cameras +Authors: Mark Richardson, Fadi Botros, Yangyang Shi, Pinhao Guo, Bradford Snow, Linguang Zhang, Jingming Dong, Keith Vertanen, Shugao Ma, Robert Wang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170853) + +Abstract: Text input is a critical component of any general purpose computing system, yet efficient and natural text input remains a challenge in AR and VR. Headset based hand-tracking has recently become pervasive among consumer VR devices and affords the opportunity to enable touch typing on virtual keyboards. We present an approach for decoding touch typing on uninstrumented flat surfaces using only egocentric camera-based hand-tracking as input. While egocentric hand-tracking accuracy is limited by issues like self occlusion and image fidelity, we show that a sufficiently diverse training set of hand motions paired with typed text can enable a deep learning model to extract signal from this noisy input. +Furthermore, by carefully designing a closed-loop data collection process, we can train an end-to-end text decoder that accounts for natural sloppy typing on virtual keyboards. +We evaluate our work with a user study (n=18) showing a mean online throughput of 42.4 WPM with an uncorrected error rate (UER) of 7% with our method compared to a physical keyboard baseline of 74.5 WPM at 0.8% UER, showing progress towards unlocking productivity and high throughput use cases in AR/VR. + + + +### Eye-Hand Movement of Objects in Near Space Extended Reality +Authors: Uta Wagner, Andreas Asferg Jacobsen, Tiare Feuchtner, Hans Gellersen, Ken Pfeuffer + +[Link](https://programs.sigchi.org/uist/2024/program/content/170771) + +Abstract: Hand-tracking in Extended Reality (XR) enables moving objects in near space with direct hand gestures, to pick, drag and drop objects in 3D. In this work, we investigate the use of eye-tracking to reduce the effort involved in this interaction. As the eyes naturally look ahead to the target for a drag operation, the principal idea is to map the translation of the object in the image plane to gaze, such that the hand only needs to control the depth component of the operation. We have implemented four techniques that explore two factors: the use of gaze only to move objects in X-Y vs.\ extra refinement by hand, and the use of hand input in + the Z axis to directly move objects vs.\ indirectly via a transfer function. We compared all four techniques in a user study (N=24) against baselines of direct and indirect hand input. We detail user performance, effort and experience trade-offs and show that all eye-hand techniques significantly reduce physical effort over direct gestures, pointing toward effortless drag-and-drop for XR environments. + + + + +## Prototyping +### ProtoDreamer: A Mixed-prototype Tool Combining Physical Model and Generative AI to Support Conceptual Design +Authors: Hongbo ZHANG, Pei Chen, Xuelong Xie, Chaoyi Lin, Lianyan Liu, Zhuoshu Li, Weitao You, Lingyun Sun + +[Link](https://programs.sigchi.org/uist/2024/program/content/170974) + +Abstract: Prototyping serves as a critical phase in the industrial conceptual design process, enabling exploration of problem space and identification of solutions. Recent advancements in large-scale generative models have enabled AI to become a co-creator in this process. However, designers often consider generative AI challenging due to the necessity to follow computer-centered interaction rules, diverging from their familiar design materials and languages. Physical prototype is a commonly used design method, offering unique benefits in prototype process, such as intuitive understanding and tangible testing. In this study, we propose ProtoDreamer, a mixed-prototype tool that synergizes generative AI with physical prototype to support conceptual design. ProtoDreamer allows designers to construct preliminary prototypes using physical materials, while AI recognizes these forms and vocal inputs to generate diverse design alternatives. This tool empowers designers to tangibly interact with prototypes, intuitively convey design intentions to AI, and continuously draw inspiration from the generated artifacts. An evaluation study confirms ProtoDreamer’s utility and strengths in time efficiency, creativity support, defects exposure, and detailed thinking facilitation. + + + +### TorqueCapsules: Fully-Encapsulated Flywheel Actuation Modules for Designing and Prototyping Movement-Based and Kinesthetic Interaction +Authors: Willa Yunqi Yang, Yifan Zou, Jingle Huang, Raouf Abujaber, Ken Nakagaki + +[Link](https://programs.sigchi.org/uist/2024/program/content/170857) + +Abstract: Flywheels are unique, versatile actuators that store and convert kinetic energy to torque, widely utilized in aerospace, robotics, haptics, and more. However, prototyping interaction using flywheels is not trivial due to safety concerns, unintuitive operation, and implementation challenges. +We present TorqueCapsules: self-contained, fully-encapsulated flywheel actuation modules that make the flywheel actuators easy to control, safe to interact with, and quick to reconfigure and customize. By fully encapsulating the actuators with a wireless microcontroller, a battery, and other components, the module can be readily attached, embedded, or stuck to everyday objects, worn to people’s bodies, or combined with other devices. With our custom GUI, both novices and expert users can easily control multiple modules to design and prototype movements and kinesthetic haptics unique to flywheel actuation. We demonstrate various applications, including actuated everyday objects, wearable haptics, and expressive robots. We conducted workshops for novices and experts to employ TorqueCapsules to collect qualitative feedback and further application examples. + + + +### AniCraft: Crafting Everyday Objects as Physical Proxies for Prototyping 3D Character Animation in Mixed Reality +Authors: Boyu Li, Linping Yuan, Zhe Yan, Qianxi Liu, Yulin Shen, Zeyu Wang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170881) + +Abstract: We introduce AniCraft, a mixed reality system for prototyping 3D character animation using physical proxies crafted from everyday objects. Unlike existing methods that require specialized equipment to support the use of physical proxies, AniCraft only requires affordable markers, webcams, and daily accessible objects and materials. AniCraft allows creators to prototype character animations through three key stages: selection of virtual characters, fabrication of physical proxies, and manipulation of these proxies to animate the characters. This authoring workflow is underpinned by diverse physical proxies, manipulation types, and mapping strategies, which ease the process of posing virtual characters and mapping user interactions with physical proxies to animated movements of virtual characters. We provide a range of cases and potential applications to demonstrate how diverse physical proxies can inspire user creativity. User experiments show that our system can outperform traditional animation methods for rapid prototyping. Furthermore, we provide insights into the benefits and usage patterns of different materials, which lead to design implications for future research. + + + +### Mul-O: Encouraging Olfactory Innovation in Various Scenarios Through a Task-Oriented Development Platform +Authors: Peizhong Gao, Fan Liu, Di Wen, Yuze Gao, Linxin Zhang, Chikelei Wang, Qiwei Zhang, Yu Zhang, Shao-en Ma, Qi Lu, Haipeng Mi, YINGQING XU + +[Link](https://programs.sigchi.org/uist/2024/program/content/170886) + +Abstract: Olfactory interfaces are pivotal in HCI, yet their development is hindered by limited application scenarios, stifling the discovery of new research opportunities. This challenge primarily stems from existing design tools focusing predominantly on odor display devices and the creation of standalone olfactory experiences, rather than enabling rapid adaptation to various contexts and tasks. Addressing this, we introduce Mul-O, a novel task-oriented development platform crafted to aid semi-professionals in navigating the diverse requirements of potential application scenarios and effectively prototyping ideas. +Mul-O facilitates the swift association and integration of olfactory experiences into functional designs, system integrations, and concept validations. Comprising a web UI for task-oriented development, an API server for seamless third-party integration, and wireless olfactory display hardware, Mul-O significantly enhances the ideation and prototyping process in multisensory tasks. This was verified by a 15-day workshop attended by 30 participants. The workshop produced seven innovative projects, underscoring Mul-O's efficacy in fostering olfactory innovation. + + + + +## Sustainable Interfaces +### Degrade to Function: Towards Eco-friendly Morphing Devices that Function Through Programmed Sequential Degradation +Authors: Qiuyu Lu, Semina Yi, Mengtian Gan, Jihong Huang, Xiao Zhang, Yue Yang, Chenyi Shen, Lining Yao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170959) + +Abstract: While it seems counterintuitive to think of degradation within an operating device as beneficial, one may argue that when rationally designed, the controlled breakdown of materials—physical, chemical, or biological—can be harnessed for specific functions. To apply this principle to the design of morphing devices, we introduce the concept of "Degrade to Function" (DtF). This concept aims to create eco-friendly and self-contained morphing devices that operate through a sequence of environmentally-triggered degradations. We explore its design considerations and implementation techniques by identifying environmental conditions and degradation types that can be exploited, evaluating potential materials capable of controlled degradation, suggesting designs for structures that can leverage degradation to achieve various transformations and functions, and developing sequential control approaches that integrate degradation triggers. To demonstrate the viability and versatility of this design strategy, we showcase several application examples across a range of environmental conditions. + + + +### WasteBanned: Supporting Zero Waste Fashion Design Through Linked Edits +Authors: Ruowang Zhang, Stefanie Mueller, Gilbert Bernstein, Adriana Schulz, Mackenzie Leake + +[Link](https://programs.sigchi.org/uist/2024/program/content/170976) + +Abstract: The commonly used cut-and-sew garment construction process, in which 2D fabric panels are cut from sheets of fabric and assembled into 3D garments, contributes to widespread textile waste in the fashion industry. There is often a significant divide between the design of the garment and the layout of the panels. One opportunity for bridging this gap is the emerging study and practice of zero waste fashion design, which involves creating clothing designs with maximum layout efficiency. Enforcing the strict constraints of zero waste sewing is challenging, as edits to one region of the garment necessarily affect neighboring panels. Based on our formative work to understand this emerging area within fashion design, we present WasteBanned, a tool that combines CAM and CAD to help users prioritize efficient material usage, work within these zero waste constraints, and edit existing zero waste garment patterns. Our user evaluation indicates that our tool helps fashion designers edit zero waste patterns to fit different bodies and add stylistic variation, while creating highly efficient fabric layouts. + + + +### HoloChemie - Sustainable Fabrication of Soft Biochemical Holographic Devices for Ubiquitous Sensing +Authors: Sutirtha Roy, Moshfiq-Us-Saleheen Chowdhury, Jurjaan Noim, Richa Pandey, Aditya Shekhar Nittala + +[Link](https://programs.sigchi.org/uist/2024/program/content/170931) + +Abstract: Sustainable fabrication approaches and biomaterials are increasingly being used in HCI to fabricate interactive devices. However, the majority of the work has focused on integrating electronics. This paper takes a sustainable approach to exploring the fabrication of biochemical sensing devices. Firstly, we contribute a set of biochemical formulations for biological and environmental sensing with bio-sourced and environment-friendly substrate materials. Our formulations are based on a combination of enzymes derived from bacteria and fungi, plant extracts and commercially available chemicals to sense both liquid and gaseous analytes: glucose, lactic acid, pH levels and carbon dioxide. Our novel holographic sensing scheme allows for detecting the presence of analytes and enables quantitative estimation of the analyte levels. We present a set of application scenarios that demonstrate the versatility of our approach and discuss the sustainability aspects, its limitations, and the implications for bio-chemical systems in HCI. + + + + +## Sound & Music +### SonoHaptics: An Audio-Haptic Cursor for Gaze-Based Object Selection in XR +Authors: Hyunsung Cho, Naveen Sendhilnathan, Michael Nebeling, Tianyi Wang, Purnima Padmanabhan, Jonathan Browder, David Lindlbauer, Tanya Jonker, Kashyap Todi + +[Link](https://programs.sigchi.org/uist/2024/program/content/170927) + +Abstract: We introduce SonoHaptics, an audio-haptic cursor for gaze-based 3D object selection. SonoHaptics addresses challenges around providing accurate visual feedback during gaze-based selection in Extended Reality (XR), e.g., lack of world-locked displays in no- or limited-display smart glasses and visual inconsistencies. To enable users to distinguish objects without visual feedback, SonoHaptics employs the concept of cross-modal correspondence in human perception to map visual features of objects (color, size, position, material) to audio-haptic properties (pitch, amplitude, direction, timbre). We contribute data-driven models for determining cross-modal mappings of visual features to audio and haptic features, and a computational approach to automatically generate audio-haptic feedback for objects in the user's environment. SonoHaptics provides global feedback that is unique to each object in the scene, and local feedback to amplify differences between nearby objects. Our comparative evaluation shows that SonoHaptics enables accurate object identification and selection in a cluttered scene without visual feedback. + + + +### SonifyAR: Context-Aware Sound Generation in Augmented Reality +Authors: Xia Su, Jon Froehlich, Eunyee Koh, Chang Xiao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170866) + +Abstract: Sound plays a crucial role in enhancing user experience and immersiveness in Augmented Reality (AR). However, current platforms lack support for AR sound authoring due to limited interaction types, challenges in collecting and specifying context information, and difficulty in acquiring matching sound assets. We present SonifyAR, an LLM-based AR sound authoring system that generates context-aware sound effects for AR experiences. SonifyAR expands the current design space of AR sound and implements a Programming by Demonstration (PbD) pipeline to automatically collect contextual information of AR events, including virtual-content-semantics and real-world context. This context information is then processed by a large language model to acquire sound effects with Recommendation, Retrieval, Generation, and Transfer methods. To evaluate the usability and performance of our system, we conducted a user study with eight participants and created five example applications, including an AR-based science experiment, and an assistive application for low-vision AR users. + + + +### Auptimize: Optimal Placement of Spatial Audio Cues for Extended Reality +Authors: Hyunsung Cho, Alexander Wang, Divya Kartik, Emily Xie, Yukang Yan, David Lindlbauer + +[Link](https://programs.sigchi.org/uist/2024/program/content/170952) + +Abstract: Spatial audio in Extended Reality (XR) provides users with better awareness of where virtual elements are placed, and efficiently guides them to events such as notifications, system alerts from different windows, or approaching avatars. Humans, however, are inaccurate in localizing sound cues, especially with multiple sources due to limitations in human auditory perception such as angular discrimination error and front-back confusion. This decreases the efficiency of XR interfaces because users misidentify from which XR element a sound is coming. To address this, we propose Auptimize, a novel computational approach for placing XR sound sources, which mitigates such localization errors by utilizing the ventriloquist effect. Auptimize disentangles the sound source locations from the visual elements and relocates the sound sources to optimal positions for unambiguous identification of sound cues, avoiding errors due to inter-source proximity and front-back confusion. Our evaluation shows that Auptimize decreases spatial audio-based source identification errors compared to playing sound cues at the paired visual-sound locations. We demonstrate the applicability of Auptimize for diverse spatial audio-based interactive XR scenarios. + + + +### EarHover: Mid-Air Gesture Recognition for Hearables Using Sound Leakage Signals +BEST_PAPER + +Authors: Shunta Suzuki, Takashi Amesaka, Hiroki Watanabe, Buntarou Shizuki, Yuta Sugiura + +[Link](https://programs.sigchi.org/uist/2024/program/content/170787) + +Abstract: We introduce EarHover, an innovative system that enables mid-air gesture input for hearables. Mid-air gesture input, which eliminates the need to touch the device and thus helps to keep hands and the device clean, has been known to have high demand based on previous surveys. However, existing mid-air gesture input methods for hearables have been limited to adding cameras or infrared sensors. By focusing on the sound leakage phenomenon unique to hearables, we have realized mid-air gesture recognition using a speaker and an external microphone that are highly compatible with hearables. The signal leaked to the outside of the device due to sound leakage can be measured by an external microphone, which detects the differences in reflection characteristics caused by the hand's speed and shape during mid-air gestures. +Among 27 types of gestures, we determined the seven most suitable gestures for EarHover in terms of signal discrimination and user acceptability. We then evaluated the gesture detection and classification performance of two prototype devices (in-ear type/open-ear type) for real-world application scenarios. + + + +### Towards Music-Aware Virtual Assistants +Authors: Alexander Wang, David Lindlbauer, Chris Donahue + +[Link](https://programs.sigchi.org/uist/2024/program/content/170955) + +Abstract: We propose a system for modifying spoken notifications in a manner that is sensitive to the music a user is listening to. Spoken notifications provide convenient access to rich information without the need for a screen. Virtual assistants see prevalent use in hands-free settings such as driving or exercising, activities where users also regularly enjoy listening to music. In such settings, virtual assistants will temporarily mute a user's music to improve intelligibility. However, users may perceive these interruptions as intrusive, negatively impacting their music-listening experience. To address this challenge, we propose the concept of music-aware virtual assistants, where speech notifications are modified to resemble a voice singing in harmony with the user's music. We contribute a system that processes user music and notification text to produce a blended mix, replacing original song lyrics with the notification content. In a user study comparing musical assistants to standard virtual assistants, participants expressed that musical assistants fit better with music, reduced intrusiveness, and provided a more delightful listening experience overall. + + + + + +## Learning to Learn +### Patterns of Hypertext-Augmented Sensemaking +Authors: Siyi Zhu, Robert Haisfield, Brendan Langen, Joel Chan + +[Link](https://programs.sigchi.org/uist/2024/program/content/170882) + +Abstract: The early days of HCI were marked by bold visions of hypertext as a transformative medium for augmented sensemaking, exemplified in systems like Memex, Xanadu, and NoteCards. Today, however, hypertext is often disconnected from discussions of the future of sensemaking. In this paper, we investigate how the recent resurgence in hypertext ``tools for thought'' might point to new directions for hypertext-augmented sensemaking. Drawing on detailed analyses of guided tours with 23 scholars, we describe hypertext-augmented use patterns for dealing with the core problem of revisiting and reusing existing/past ideas during scholarly sensemaking. We then discuss how these use patterns validate and extend existing knowledge of hypertext design patterns for sensemaking, and point to new design opportunities for augmented sensemaking. + + + +### Augmented Physics: Creating Interactive and Embedded Physics Simulations from Static Textbook Diagrams +BEST_PAPER + +Authors: Aditya Gunturu, Yi Wen, Nandi Zhang, Jarin Thundathil, Rubaiat Habib Kazi, Ryo Suzuki + +[Link](https://programs.sigchi.org/uist/2024/program/content/170907) + +Abstract: We introduce Augmented Physics, a machine learning-integrated authoring tool designed for creating embedded interactive physics simulations from static textbook diagrams. Leveraging recent advancements in computer vision, such as Segment Anything and Multi-modal LLMs, our web-based system enables users to semi-automatically extract diagrams from physics textbooks and generate interactive simulations based on the extracted content. These interactive diagrams are seamlessly integrated into scanned textbook pages, facilitating interactive and personalized learning experiences across various physics concepts, such as optics, circuits, and kinematics. Drawing from an elicitation study with seven physics instructors, we explore four key augmentation strategies: 1) augmented experiments, 2) animated diagrams, 3) bi-directional binding, and 4) parameter visualization. We evaluate our system through technical evaluation, a usability study (N=12), and expert interviews (N=12). Study findings suggest that our system can facilitate more engaging and personalized learning experiences in physics education. + + + +### Qlarify: Recursively Expandable Abstracts for Dynamic Information Retrieval over Scientific Papers +Authors: Raymond Fok, Joseph Chee Chang, Tal August, Amy Zhang, Daniel Weld + +[Link](https://programs.sigchi.org/uist/2024/program/content/170964) + +Abstract: Navigating the vast scientific literature often starts with browsing a paper’s abstract. However, when a reader seeks additional information, not present in the abstract, they face a costly cognitive chasm during their dive into the full text. To bridge this gap, we introduce recursively expandable abstracts, a novel interaction paradigm that dynamically expands abstracts by progressively incorporating additional information from the papers’ full text. This lightweight interaction allows scholars to specify their information needs by quickly brushing over the abstract or selecting AI-suggested expandable entities. Relevant information is synthesized using a retrieval-augmented generation approach, presented as a fluid, threaded expansion of the abstract, and made efficiently verifiable via attribution to relevant source-passages in the paper. Through a series of user studies, we demonstrate the utility of recursively expandable abstracts and identify future opportunities to support low-effort and just-in-time exploration of long-form information contexts through LLM-powered interactions. + + + +### LessonPlanner: Assisting Novice Teachers to Prepare Pedagogy-Driven Lesson Plans with Large Language Models +Authors: Haoxiang Fan, Guanzheng Chen, Xingbo Wang, Zhenhui Peng + +[Link](https://programs.sigchi.org/uist/2024/program/content/170883) + +Abstract: Preparing a lesson plan, e.g., a detailed road map with strategies and materials for instructing a 90-minute class, is beneficial yet challenging for novice teachers. Large language models (LLMs) can ease this process by generating adaptive content for lesson plans, which would otherwise require teachers to create from scratch or search existing resources. In this work, we first conduct a formative study with six novice teachers to understand their needs for support of preparing lesson plans with LLMs. Then, we develop LessonPlanner that assists users to interactively construct lesson plans with adaptive LLM-generated content based on Gagne's nine events. Our within-subjects study (N=12) shows that compared to the baseline ChatGPT interface, LessonPlanner can significantly improve the quality of outcome lesson plans and ease users' workload in the preparation process. Our expert interviews (N=6) further demonstrate LessonPlanner's usefulness in suggesting effective teaching strategies and meaningful educational resources. We discuss concerns on and design considerations for supporting teaching activities with LLMs. + + + + +## Hot Interfaces +### Fiery Hands: Designing Thermal Glove through Thermal and Tactile Integration for Virtual Object Manipulation +Authors: Haokun Wang, Yatharth Singhal, Hyunjae Gil, Jin Ryong Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170880) + +Abstract: We present a novel approach to render thermal and tactile feedback to the palm and fingertips through thermal and tactile integration. Our approach minimizes the obstruction of the palm and inner side of the fingers and enables virtual object manipulation while providing localized and global thermal feedback. By leveraging thermal actuators positioned strategically on the outer palm and back of the fingers in interplay with tactile actuators, our approach exploits thermal referral and tactile masking phenomena. Through a series of user studies, we validate the perception of localized thermal sensations across the palm and fingers, showcasing the ability to generate diverse thermal patterns. Furthermore, we demonstrate the efficacy of our approach in VR applications, replicating diverse thermal interactions with virtual objects. This work represents significant progress in thermal interactions within VR, offering enhanced sensory immersion at an optimal energy cost. + + + +### DexteriSync: A Hand Thermal I/O Exoskeleton for Morphing Finger Dexterity Experience +Authors: Ximing Shen, Youichi Kamiyama, Kouta Minamizawa, Jun Nishida + +[Link](https://programs.sigchi.org/uist/2024/program/content/170898) + +Abstract: Skin temperature is an important physiological factor for human hand dexterity. Leveraging this feature, we engineered an exoskeleton, called DexteriSync, that can dynamically adjust the user's finger dexterity and induce different thermal perceptions by modulating finger skin temperature. This exoskeleton comprises flexible silicone-copper tube segments, 3D-printed finger sockets, a 3D-printed palm base, a pump system, and a water temperature control with a storage unit. By realising an embodied experience of compromised dexterity, DexteriSync can help product designers understand the lived experience of compromised hand dexterity, such as that of the elderly and/or neurodivergent users, when designing daily necessities for them. We validated DexteriSync via a technical evaluation and two user studies, demonstrating that it can change skin temperature, dexterity, and thermal perception. An exploratory session with design students and an autistic compromised dexterity individual, demonstrated the exoskeleton provided a more realistic experience compared to video education, and allowed them to gain higher confidence in their designs. The results advocated for the efficacy of experiencing embodied compromised finger dexterity, which can promote an understanding of the related physical challenges and lead to a more persuasive design for assistive tools. + + + +### Flip-Pelt: Motor-Driven Peltier Elements for Rapid Thermal Stimulation and Congruent Pressure Feedback in Virtual Reality +Authors: Seongjun Kang, Gwangbin Kim, Seokhyun Hwang, Jeongju Park, Ahmed Elsharkawy, SeungJun Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170885) + +Abstract: This study introduces "Flip-Pelt," a motor-driven peltier device designed to provide rapid thermal stimulation and congruent pressure feedback in virtual reality (VR) environments. Our system incorporates eight motor-driven peltier elements, allowing for the flipping of preheated or cooled elements to the opposite side. In evaluating the Flip-Pelt device, we assess user ability to distinguish between heat/cold sources by their patterns and stiffness, and its impact on enhancing haptic experiences in VR content that involves contact with various thermal sources. Our findings demonstrate that rapid thermal stimulation and congruent pressure feedback provided by Flip-Pelt enhance the recognition accuracy of thermal patterns and the stiffness of virtual objects. These features also improve haptic experiences in VR scenarios through their temporal congruency between tactile and thermal stimuli. Additionally, we discuss the scalability of the Flip-Pelt system to other body parts by proposing design prototypes. + + + +### Hydroptical Thermal Feedback: Spatial Thermal Feedback Using Visible Lights and Water +Authors: Sosuke Ichihashi, Masahiko Inami, Hsin-Ni Ho, Noura Howell + +[Link](https://programs.sigchi.org/uist/2024/program/content/170722) + +Abstract: We control the temperature of materials in everyday interactions, recognizing temperature's important influence on our bodies, minds, and experiences. However, thermal feedback is an under-explored modality in human-computer interaction partly due to its limited temporal (slow) and spatial (small-area and non-moving) capabilities. We introduce hydroptical thermal feedback, a spatial thermal feedback method that works by applying visible lights on body parts in water. Through physical measurements and psychophysical experiments, our results show: (1) Humans perceive thermal sensations when visible lights are cast on the skin under water, and perceived warmth is greater for lights with shorter wavelengths, (2) temporal capabilities, (3) apparent motion (spatial) of warmth and coolness sensations, and (4) hydroptical thermal feedback can support the perceptual illusion that the water itself is warmer. We propose applications, including virtual reality (VR), shared water experiences, and therapies. Overall, this paper contributes hydroptical thermal feedback as a novel method, empirical results demonstrating its unique capabilities, proposed applications, and design recommendations for using hydroptical thermal feedback. Our method introduces controlled, spatial thermal perceptions to water experiences. + + + + +## FABulous +### Facilitating the Parametric Definition of Geometric Properties in Programming-Based CAD +Authors: J Gonzalez Avila, Thomas Pietrzak, Audrey Girouard, Géry Casiez + +[Link](https://programs.sigchi.org/uist/2024/program/content/170736) + +Abstract: Parametric Computer-aided design (CAD) enables the creation of reusable models by integrating variables into geometric properties, facilitating customization without a complete redesign. However, creating parametric designs in programming-based CAD presents significant challenges. Users define models in a code editor using a programming language, with the application generating a visual representation in a viewport. This process involves complex programming and arithmetic expressions to describe geometric properties, linking various object properties to create parametric designs. Unfortunately, these applications lack assistance, making the process unnecessarily demanding. We propose a solution that allows users to retrieve parametric expressions from the visual representation for reuse in the code, streamlining the design process. We demonstrated this concept through a proof-of-concept implemented in the programming-based CAD application, OpenSCAD, and conducted an experiment with 11 users. Our findings suggest that this solution could significantly reduce design errors, improve interactivity and engagement in the design process, and lower the entry barrier for newcomers by reducing the mathematical skills typically required in programming-based CAD applications + + + +### Rhapso: Automatically Embedding Fiber Materials into 3D Prints for Enhanced Interactivity +Authors: Daniel Ashbrook, Wei-Ju Lin, Nicholas Bentley, Diana Soponar, Zeyu Yan, Valkyrie Savage, Lung-Pan Cheng, Huaishu Peng, Hyunyoung Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170936) + +Abstract: We introduce Rhapso, a 3D printing system designed to embed a diverse range of continuous fiber materials within 3D objects during the printing process. This approach enables integrating properties like tensile strength, force storage and transmission, or aesthetic and tactile characteristics, directly into low-cost thermoplastic 3D prints. These functional objects can have intricate actuation, self-assembly, and sensing capabilities with little to no manual intervention. To achieve this, we modify a low-cost Fused Filament Fabrication (FFF) 3D printer, adding a stepper motor-controlled fiber spool mechanism on a gear ring above the print bed. In addition to hardware, we provide parsing software for precise fiber placement, which generates Gcode for printer operation. To illustrate the versatility of our system, we present applications that showcase its extensive design potential. Additionally, we offer comprehensive documentation and open designs, empowering others to replicate our system and explore its possibilities. + + + + +### Speed-Modulated Ironing: High-Resolution Shade and Texture Gradients in Single-Material 3D Printing +Authors: Mehmet Ozdemir, Marwa AlAlawi, Mustafa Doga Dogan, Jose Martinez Castro, Stefanie Mueller, Zjenja Doubrovski + +[Link](https://programs.sigchi.org/uist/2024/program/content/170731) + +Abstract: We present Speed-Modulated Ironing, a new fabrication method for programming visual and tactile properties in single-material 3D printing. We use one nozzle to 3D print and a second nozzle to reheat printed areas at varying speeds, controlling the material's temperature-response. The rapid adjustments of speed allow for fine-grained reheating, enabling high-resolution color and texture variations. We implemented our method in a tool that allows users to assign desired properties to 3D models and creates corresponding 3D printing instructions. We demonstrate our method with three temperature-responsive materials: a foaming filament, a filament with wood fibers, and a filament with cork particles. These filaments respond to temperature by changing color, roughness, transparency, and gloss. Our technical evaluation reveals the capabilities of our method in achieving sufficient resolution and color shade range that allows surface details such as small text, photos, and QR codes on 3D-printed objects. Finally, we provide application examples demonstrating the new design capabilities enabled by Speed-Modulated Ironing. + + + +### TRAvel Slicer: Continuous Extrusion Toolpaths for 3D Printing +Authors: Jaime Gould, Camila Friedman-Gerlicz, Leah Buechley + +[Link](https://programs.sigchi.org/uist/2024/program/content/170996) + +Abstract: In this paper we present Travel Reduction Algorithm (TRAvel) Slicer, which minimizes travel movements in 3D printing. Conventional slicing software generates toolpaths with many travel movements--movements without material extrusion. Some 3D printers are incapable of starting and stopping extrusion and it is difficult to impossible to control the extrusion of many materials. This makes toolpaths with travel movements unsuitable for a wide range of printers and materials. + +We developed the open-source TRAvel Slicer to enable the printing of complex 3D models on a wider range of printers and in a wider range of materials than is currently possible. TRAvel Slicer minimizes two different kinds of travel movements--what we term Inner- and Outer-Model travel. We minimize Inner-Model travel (travel within the 3D model) by generating space-filling Fermat spirals for each contiguous planar region of the model. We minimize Outer-Model travel (travels outside of the 3D model) by ordering the printing of different branches of the model, thus limiting transitions between branches. We present our algorithm and software and then demonstrate how: 1) TRAvel Slicer makes it possible to generate high-quality prints from a metal-clay material, CeraMetal, that is functionally unprintable using an off-the-shelf slicer. 2) TRAvel Slicer dramatically increases the printing efficiency of traditional plastic 3D printing compared to an off-the-shelf slicer. + + + +### Understanding and Supporting Debugging Workflows in CAD +Authors: Felix Hähnlein, Gilbert Bernstein, Adriana Schulz + +[Link](https://programs.sigchi.org/uist/2024/program/content/170944) + +Abstract: One of the core promises of parametric Computer-Aided Design (CAD) is that users can easily edit their model at any point in time. +However, due to the ambiguity of changing references to intermediate, updated geometry, parametric edits can lead to reference errors which are difficult to fix in practice. +We claim that debugging reference errors remains challenging because CAD systems do not provide users with tools to understand where the error happened and how to fix it. +To address these challenges, we prototype a graphical debugging tool, DeCAD, which helps comparing CAD model states both across operations and across edits. +In a qualitative lab study, we use DeCAD as a probe to understand specific challenges that users face and what workflows they employ to overcome them. +We conclude with design implications for future debugging tool developers. + + + + + +## Haptics +### LoopBot: Representing Continuous Haptics of Grounded Objects in Room-scale VR +Authors: Tetsushi Ikeda, Kazuyuki Fujita, Kumpei Ogawa, Kazuki Takashima, Yoshifumi Kitamura + +[Link](https://programs.sigchi.org/uist/2024/program/content/171016) + +Abstract: In room-scale virtual reality, providing continuous haptic feedback from touching grounded objects, such as walls and handrails, has been challenging due to the user's walking range and the required force. In this study, we propose LoopBot, a novel technique to provide continuous haptic feedback from grounded objects using only a single user-following robot. Specifically, LoopBot is equipped with a loop-shaped haptic prop attached to an omnidirectional robot that scrolls to cancel out the robot's displacement, giving the user the haptic sensation that the prop is actually fixed in place, or ``grounded.'' We first introduce the interaction design space of LoopBot and, as one of its promising interaction scenarios, implement a prototype for the experience of walking while grasping handrails. A performance evaluation shows that scrolling the prop cancels $77.5\%$ of the robot's running speed on average. A preliminary user test ($N=10$) also shows that the subjective realism of the experience and the sense of the virtual handrails being grounded were significantly higher than when the prop was not scrolled. Based on these findings, we discuss possible further development of LoopBot. + + + +### JetUnit: Rendering Diverse Force Feedback in Virtual Reality Using Water Jets +Authors: Zining Zhang, Jiasheng Li, Zeyu Yan, Jun Nishida, Huaishu Peng + +[Link](https://programs.sigchi.org/uist/2024/program/content/170767) + +Abstract: We propose JetUnit, a water-based VR haptic system designed to produce force feedback with a wide spectrum of intensities and frequencies through water jets. The key challenge in designing this system lies in optimizing parameters to enable the haptic device to generate force feedback that closely replicates the most intense force produced by direct water jets while ensuring the user remains dry. In this paper, we present the key design parameters of the JetUnit wearable device determined through a set of quantitative experiments and a perception study. We further conducted a user study to assess the impact of integrating our haptic solutions into virtual reality experiences. The results revealed that, by adhering to the design principles of JetUnit, the water-based haptic system is capable of delivering diverse force feedback sensations, significantly enhancing the immersive experience in virtual reality. + + + +### Selfrionette: A Fingertip Force-Input Controller for Continuous Full-Body Avatar Manipulation and Diverse Haptic Interactions +Authors: Takeru Hashimoto, Yutaro Hirao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170833) + +Abstract: We propose Selfrionette, a controller that uses fingertip force input to drive avatar movements in virtual reality (VR). +This system enables users to interact with virtual objects and walk in VR using only fingertip force, overcoming physical and spatial constraints. Additionally, by fixing users' fingers, it provides users with counterforces equivalent to the applied force, allowing for diverse and wide dynamic range haptic feedback by adjusting the relationship between force input and virtual movement. +To evaluate the effectiveness of the proposed method, this paper focuses on hand interaction as a first step. +In User Study 1, we measured usability and embodiment during reaching tasks under Selfrionette, body tracking, and finger tracking conditions. +In User Study 2, we investigated whether users could perceive haptic properties such as weight, friction, and compliance under the same conditions as User Study 1. +Selfrionette was found to be comparable to body tracking in realism of haptic interaction, enabling embodied avatar experiences even in limited spatial conditions. + + + +### SpinShot: Optimizing Both Physical and Perceived Force Feedback of Flywheel-Based, Directional Impact Handheld Devices +Authors: Chia-An Fan, En-Huei Wu, Chia-Yu Cheng, Yu-Cheng Chang, Alvaro Lopez, Yu Chen, Chia-Chen Chi, Yi-Sheng Chan, Ching-Yi Tsai, Mike Chen + +[Link](https://programs.sigchi.org/uist/2024/program/content/170972) + +Abstract: Real-world impact, such as hitting a tennis ball and a baseball, generates instantaneous, directional impact forces. However, current ungrounded force feedback technologies, such as air jets and propellers, can only generate directional impulses that are 10x-10,000x weaker. We present SpinShot, a flywheel-based device with a solenoid-actuated stopper capable of generating directional impulse of 22Nm in 1ms, which is more than 10x stronger than prior ungrounded directional technologies. Furthermore, we present a novel force design that reverses the flywheel immediately after the initial impact, to significantly increase the perceived magnitude. We conducted a series of two formative, perceptual studies (n=16, 18), followed by a summative user experience study (n=16) that compared SpinShot vs. moving mass (solenoid) and vs. air jets in a VR baseball hitting game. Results showed that SpinShot significantly improved realism, immersion, magnitude (p < .01) compared to both baselines, but significantly reduced comfort vs. air jets primarily due to the 2.9x device weight. Overall, SpinShot was preferred by 63-75% of the participants. + + + + +## Vision-based UIs +### Vision-Based Hand Gesture Customization from a Single Demonstration +Authors: Soroush Shahi, Vimal Mollyn, Cori Tymoszek Park, Runchang Kang, Asaf Liberman, Oron Levy, Jun Gong, Abdelkareem Bedri, Gierad Laput + +[Link](https://programs.sigchi.org/uist/2024/program/content/170938) + +Abstract: Hand gesture recognition is becoming a more prevalent mode of human-computer interaction, especially as cameras proliferate across everyday devices. Despite continued progress in this field, gesture customization is often underexplored. Customization is crucial since it enables users to define and demonstrate gestures that are more natural, memorable, and accessible. However, customization requires efficient usage of user-provided data. We introduce a method that enables users to easily design bespoke gestures with a monocular camera from one demonstration. We employ transformers and meta-learning techniques to address few-shot learning challenges. Unlike prior work, our method supports any combination of one-handed, two-handed, static, and dynamic gestures, including different viewpoints, and the ability to handle irrelevant hand movements. We implement three real-world applications using our customization method, conduct a user study, and achieve up to 94\% average recognition accuracy from one demonstration. Our work provides a viable path for vision-based gesture customization, laying the foundation for future advancements in this domain. + + + +### VirtualNexus: Enhancing 360-Degree Video AR/VR Collaboration with Environment Cutouts and Virtual Replicas +Authors: Xincheng Huang, Michael Yin, Ziyi Xia, Robert Xiao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170877) + +Abstract: Asymmetric AR/VR collaboration systems bring a remote VR user to a local AR user’s physical environment, allowing them to communicate and work within a shared virtual/physical space. Such systems often display the remote environment through 3D reconstructions or 360° videos. While 360° cameras stream an environment in higher quality, they lack spatial information, making them less interactable. We present VirtualNexus, an AR/VR collaboration system that enhances 360° video AR/VR collaboration with environment cutouts and virtual replicas. VR users can define cutouts of the remote environment to interact with as a world-in-miniature, and their interactions are synchronized to the local AR perspective. Furthermore, AR users can rapidly scan and share 3D virtual replicas of physical objects using neural rendering. We demonstrated our system’s utility through 3 example applications and evaluated our system in a dyadic usability test. VirtualNexus extends the interaction space of 360° telepresence systems, offering improved physical presence, versatility, and clarity in interactions. + + + +### Personal Time-Lapse +Authors: Nhan Tran, Ethan Yang, Angelique Taylor, Abe Davis + +[Link](https://programs.sigchi.org/uist/2024/program/content/170932) + +Abstract: Our bodies are constantly in motion—from the bending of arms and legs to the less conscious movement of breathing, our precise shape and location change constantly. This can make subtler developments (e.g., the growth of hair, or the healing of a wound) difficult to observe. Our work focuses on helping users record and visualize this type of subtle, longer-term change. We present a mobile tool that combines custom 3D tracking with interactive visual feedback and computational imaging to capture personal time-lapse, which approximates longer-term video of the subject (typically, part of the capturing user’s body) under a fixed viewpoint, body pose, and lighting condition. These personal time-lapses offer a powerful and detailed way to track visual changes of the subject over time. We begin with a formative study that examines what makes personal time-lapse so difficult to capture. Building on our findings, we motivate the design of our capture tool, evaluate this design with users, and demonstrate its effectiveness in a variety of challenging examples. + + + +### Chromaticity Gradient Mapping for Interactive Control of Color Contrast in Images and Video +Authors: Ruyu Yan, Jiatian Sun, Abe Davis + +[Link](https://programs.sigchi.org/uist/2024/program/content/170867) + +Abstract: We present a novel perceptually-motivated interactive tool for using color contrast to enhance details represented in the lightness channel of images and video. Our method lets users adjust the perceived contrast of different details by manipulating local chromaticity while preserving the original lightness of individual pixels. Inspired by the use of similar chromaticity mappings in painting, our tool effectively offers contrast along a user-selected gradient of chromaticities as additional bandwidth for representing and enhancing different details in an image. We provide an interface for our tool that closely resembles the familiar design of tonal contrast curve controls that are available in most professional image editing software. We show that our tool is effective for enhancing the perceived contrast of details without altering lightness in an image and present many examples of effects that can be achieved with our method on both images and video. + + + + +## Future of Typing +### OptiBasePen: Mobile Base+Pen Input on Passive Surfaces by Sensing Relative Base Motion Plus Close-Range Pen Position +Authors: Andreas Fender, Mohamed Kari + +[Link](https://programs.sigchi.org/uist/2024/program/content/170879) + +Abstract: Digital pen input devices based on absolute pen position sensing, such as Wacom Pens, support high-fidelity pen input. However, they require specialized sensing surfaces like drawing tablets, which can have a large desk footprint, constrain the possible input area, and limit mobility. In contrast, digital pens with integrated relative sensing enable mobile use on passive surfaces, but suffer from motion artifacts or require surface contact at all times, deviating from natural pen affordances. We present OptiBasePen, a device for mobile pen input on ordinary surfaces. Our prototype consists of two parts: the "base" on which the hand rests and the pen for fine-grained input. The base features a high-precision mouse sensor to sense its own relative motion, and two infrared image sensors to track the absolute pen tip position within the base's frame of reference. This enables pen input on ordinary surfaces without external cameras while also avoiding drift from pen micro-movements. In this work, we present our prototype as well as the general base+pen concept, which combines relative and absolute sensing. + + + +### Palmrest+: Expanding Laptop Input Space with Shear Force on Palm-Resting Area +Authors: Jisu Yim, Seoyeon Bae, Taejun Kim, Sunbum Kim, Geehyuk Lee + +[Link](https://programs.sigchi.org/uist/2024/program/content/170781) + +Abstract: The palmrest area of laptops has the potential as an additional input space, considering its consistent palm contact during keyboard interaction. We propose Palmrest+, leveraging shear force exerted on the palmrest area. We suggest two input techniques: Palmrest Shortcut, for instant shortcut execution, and Palmrest Joystick, for continuous value input. These allow seamless and subtle input amidst keyboard typing. Evaluation of Palmrest Shortcut against conventional keyboard shortcuts revealed faster performance for applying shear force in unimanual and bimanual-manner with a significant reduction in gaze shifting. Additionally, the assessment of Palmrest Joystick against the laptop touchpad demonstrated comparable performance in selecting one- and two- dimensional targets with low-precision pointing, i.e., for short distances and large target sizes. The maximal hand displacement significantly decreased for both Palmrest Shortcut and Palmrest Joystick compared to conventional methods. These findings verify the feasibility and effectiveness of leveraging the palmrest area as an additional input space on laptops, offering promising enhanced typing-related user interaction experiences. + + + +### TouchInsight: Uncertainty-aware Rapid Touch and Text Input for Mixed Reality from Egocentric Vision +Authors: Paul Streli, Mark Richardson, Fadi Botros, Shugao Ma, Robert Wang, Christian Holz + +[Link](https://programs.sigchi.org/uist/2024/program/content/170817) + +Abstract: While passive surfaces offer numerous benefits for interaction in mixed reality, reliably detecting touch input solely from head-mounted cameras has been a long-standing challenge. Camera specifics, hand self-occlusion, and rapid movements of both head and fingers introduce considerable uncertainty about the exact location of touch events. Existing methods have thus not been capable of achieving the performance needed for robust interaction. +In this paper, we present a real-time pipeline that detects touch input from all ten fingers on any physical surface, purely based on egocentric hand tracking. Our method TouchInsight comprises a neural network to predict the moment of a touch event, the finger making contact, and the touch location. TouchInsight represents locations through a bivariate Gaussian distribution to account for uncertainties due to sensing inaccuracies, which we resolve through contextual priors to accurately infer intended user input. +We first evaluated our method offline and found that it locates input events with a mean error of 6.3 mm, and accurately detects touch events (F1=0.99) and identifies the finger used (F1=0.96). In an online evaluation, we then demonstrate the effectiveness of our approach for a core application of dexterous touch input: two-handed text entry. In our study, participants typed 37.0 words per minute with an uncorrected error rate of 2.9% on average. + + + +### Can Capacitive Touch Images Enhance Mobile Keyboard Decoding? +Authors: Piyawat Lertvittayakumjorn, Shanqing Cai, Billy Dou, Cedric Ho, Shumin Zhai + +[Link](https://programs.sigchi.org/uist/2024/program/content/170719) + +Abstract: Capacitive touch sensors capture the two-dimensional spatial profile (referred to as a touch heatmap) of a finger's contact with a mobile touchscreen. However, the research and design of touchscreen mobile keyboards -- one of the most speed and accuracy demanding touch interfaces -- has focused on the location of the touch centroid derived from the touch image heatmap as the input, discarding the rest of the raw spatial signals. In this paper, we investigate whether touch heatmaps can be leveraged to further improve the tap decoding accuracy for mobile touchscreen keyboards. Specifically, we developed and evaluated machine-learning models that interpret user taps by using the centroids and/or the heatmaps as their input and studied the contribution of the heatmaps to model performance. The results show that adding the heatmap into the input feature set led to 21.4% relative reduction of character error rates on average, compared to using the centroid alone. Furthermore, we conducted a live user study with the centroid-based and heatmap-based decoders built into Pixel 6 Pro devices and observed lower error rate, faster typing speed, and higher self-reported satisfaction score based on the heatmap-based decoder than the centroid-based decoder. These findings underline the promise of utilizing touch heatmaps for improving typing experience in mobile keyboards. + + + + +## Bodily Signals +### Empower Real-World BCIs with NIRS-X: An Adaptive Learning Framework that Harnesses Unlabeled Brain Signals +Authors: Liang Wang, Jiayan Zhang, Jinyang Liu, Devon McKeon, David Guy Brizan, Giles Blaney, Robert Jacob + +[Link](https://programs.sigchi.org/uist/2024/program/content/170939) + +Abstract: Brain-Computer Interfaces (BCIs) using functional near-infrared spectroscopy (fNIRS) hold promise for future interactive user interfaces due to their ease of deployment and declining cost. However, they typically require a separate calibration process for each user and task, which can be burdensome. Machine learning helps, but faces a data scarcity problem. Due to inherent inter-user variations in physiological data, it has been typical to create a new annotated training dataset for every new task and user. To reduce dependence on such extensive data collection and labeling, we present an adaptive learning framework, NIRS-X, to harness more easily accessible unlabeled fNIRS data. NIRS-X includes two key components: NIRSiam and NIRSformer. We use the NIRSiam algorithm to extract generalized brain activity representations from unlabeled fNIRS data obtained from previous users and tasks, and then transfer that knowledge to new users and tasks. In conjunction, we design a neural network, NIRSformer, tailored for capturing both local and global, spatial and temporal relationships in multi-channel fNIRS brain input signals. By using unlabeled data from both a previously released fNIRS2MW visual $n$-back dataset and a newly collected fNIRS2MW audio $n$-back dataset, NIRS-X demonstrates its strong adaptation capability to new users and tasks. Results show comparable or superior performance to supervised methods, making NIRS-X promising for real-world fNIRS-based BCIs. + + + +### Understanding the Effects of Restraining Finger Coactivation in Mid-Air Typing: from a Neuromechanical Perspective +Authors: Hechuan Zhang, Xuewei Liang, Ying Lei, Yanjun Chen, Zhenxuan He, Yu Zhang, Lihan Chen, Hongnan Lin, Teng Han, Feng Tian + +[Link](https://programs.sigchi.org/uist/2024/program/content/170941) + +Abstract: Typing in mid-air is often perceived as intuitive yet presents challenges due to finger coactivation, a neuromechanical phenomenon that involves involuntary finger movements stemming from the lack of physical constraints. Previous studies were used to examine and address the impacts of finger coactivation using algorithmic approaches. Alternatively, this paper explores the neuromechanical effects of finger coactivation on mid-air typing, aiming to deepen our understanding and provide valuable insights to improve these interactions. We utilized a wearable device that restrains finger coactivation as a prop to conduct two mid-air studies, including a rapid finger-tapping task and a ten-finger typing task. The results revealed that restraining coactivation not only reduced mispresses, which is a classic coactivated error always considered as harm caused by coactivation. Unexpectedly, the reduction of motor control errors and spelling errors, thinking as non-coactivated errors, also be observed. +Additionally, the study evaluated the neural resources involved in motor execution using functional Near Infrared Spectroscopy (fNIRS), which tracked cortical arousal during mid-air typing. The findings demonstrated decreased activation in the primary motor cortex of the left hemisphere when coactivation was restrained, suggesting a diminished motor execution load. This reduction suggests that a portion of neural resources is conserved, which also potentially aligns with perceived lower mental workload and decreased frustration levels. + + + +### What is Affective Touch Made Of? A Soft Capacitive Sensor Array Reveals the Interplay between Shear, Normal Stress and Individuality +Authors: Devyani McLaren, Jian Gao, Xiulun Yin, Rúbia Reis Guerra, Preeti Vyas, Chrys Morton, Xi Laura Cang, Yizhong Chen, Yiyuan Sun, Ying Li, John Madden, Karon MacLean + +[Link](https://programs.sigchi.org/uist/2024/program/content/171010) + +Abstract: Humans physically express emotion by modulating parameters that register on mammalian skin mechanoreceptors, but are unavailable in current touch-sensing technology. +Greater sensory richness combined with data on affect-expression composition is a prerequisite to estimating affect from touch, with applications including physical human-robot interaction. To examine shear alongside more easily captured normal stresses, we tailored recent capacitive technology to attain performance suitable for affective touch, creating a flexible, reconfigurable and soft 36-taxel array that detects multitouch normal and 2-dimensional shear at ranges of 1.5kPa-43kPa and $\pm$ 0.3-3.8kPa respectively, wirelessly at ~43Hz (1548 taxels/s). In a deep-learning classification of 9 gestures (N=16), inclusion of shear data improved accuracy to 88\%, compared to 80\% with normal stress data alone, confirming shear stress's expressive centrality. +Using this rich data, we analyse the interplay of sensed-touch features, gesture attributes and individual differences, propose affective-touch sensing requirements, and share technical considerations for performance and practicality. + + + +### Exploring the Effects of Sensory Conflicts on Cognitive Fatigue in VR Remappings +HONORABLE_MENTION + +Authors: Tianren Luo, Gaozhang Chen, Yijian Wen, Pengxiang Wang, yachun fan, Teng Han, Feng Tian + +[Link](https://programs.sigchi.org/uist/2024/program/content/171000) + +Abstract: Virtual reality (VR) is found to present significant cognitive challenges due to its immersive nature and frequent sensory conflicts. This study systematically investigates the impact of sensory conflicts induced by VR remapping techniques on cognitive fatigue, and unveils their correlation. We utilized three remapping methods (haptic repositioning, head-turning redirection, and giant resizing) to create different types of sensory conflicts, and measured perceptual thresholds to induce various intensities of the conflicts. Through experiments involving cognitive tasks along with subjective and physiological measures, we found that all three remapping methods influenced the onset and severity of cognitive fatigue, with visual-vestibular conflict having the greatest impact. Interestingly, visual-experiential/memory conflict showed a mitigating effect on cognitive fatigue, emphasizing the role of novel sensory experiences. This study contributes to a deeper understanding of cognitive fatigue under sensory conflicts and provides insights for designing VR experiences that align better with human perceptual and cognitive capabilities. + + + + +## Shared Spaces +### BlendScape: Enabling End-User Customization of Video-Conferencing Environments through Generative AI +HONORABLE_MENTION + +Authors: Shwetha Rajaram, Nels Numan, Bala Kumaravel, Nicolai Marquardt, Andrew Wilson + +[Link](https://programs.sigchi.org/uist/2024/program/content/170854) + +Abstract: Today’s video-conferencing tools support a rich range of professional and social activities, but their generic meeting environments cannot be dynamically adapted to align with distributed collaborators’ needs. To enable end-user customization, we developed BlendScape, a rendering and composition system for video-conferencing participants to tailor environments to their meeting context by leveraging AI image generation techniques. BlendScape supports flexible representations of task spaces by blending users’ physical or digital backgrounds into unified environments and implements multimodal interaction techniques to steer the generation. Through an exploratory study with 15 end-users, we investigated whether and how they would find value in using generative AI to customize video-conferencing environments. Participants envisioned using a system like BlendScape to facilitate collaborative activities in the future, but required further controls to mitigate distracting or unrealistic visual elements. We implemented scenarios to demonstrate BlendScape's expressiveness for supporting environment design strategies from prior work and propose composition techniques to improve the quality of environments. + + + +### MyWebstrates: Webstrates as Local-first Software +Authors: Clemens Klokmose, James Eagan, Peter van Hardenberg + +[Link](https://programs.sigchi.org/uist/2024/program/content/170812) + +Abstract: Webstrates are web substrates, a practical realization of shareable dynamic media under which distributability, shareability, and malleability are fundamental software principles. Webstrates blur the distinction between application and document in a way that enables users to share, repurpose, and refit software across a variety of domains, but its reliance on a central server constrains its use; it is at odds with personal and collective control of data; and limits applications to the web. We extend the fundamental principles to include interoperability and sovereignty over data and propose MyWebstrates, an implementation of Webstrates on top of a new, lower-level substrate for synchronization built around local-first software principles. MyWebstrates registers itself in the user’s browser and function as a piece of local software that can selectively synchronise data over sync servers or peer-to-peer connections. We show how MyWebstrates extends Webstrates to enable offline collaborative use, interoperate between Webstrates on non-web technologies such as Unity, and maintain personal and collective sovereignty over data. We demonstrate how this enables new types of applications of Webstrates and discuss limitations of this approach and new challenges that it reveals. + + + +### SituationAdapt: Contextual UI Optimization in Mixed Reality with Situation Awareness via LLM Reasoning +Authors: Zhipeng Li, Christoph Gebhardt, Yves Inglin, Nicolas Steck, Paul Streli, Christian Holz + +[Link](https://programs.sigchi.org/uist/2024/program/content/170856) + +Abstract: Mixed Reality is increasingly used in mobile settings beyond controlled home and office spaces. This mobility introduces the need for user interface layouts that adapt to varying contexts. However, existing adaptive systems are designed only for static environments. In this paper, we introduce SituationAdapt, a system that adjusts Mixed Reality UIs to real-world surroundings by considering environmental and social cues in shared settings. Our system consists of perception, reasoning, and optimization modules for UI adaptation. Our perception module identifies objects and individuals around the user, while our reasoning module leverages a Vision-and-Language Model to assess the placement of interactive UI elements. This ensures that adapted layouts do not obstruct relevant environmental cues or interfere with social norms. Our optimization module then generates Mixed Reality interfaces that account for these considerations as well as temporal constraints The evaluation of SituationAdapt is two-fold: We first validate our reasoning component’s capability in assessing UI contexts comparable to human expert users. In an online user study, we then established our system’s capability of producing context-aware MR layouts, where it outperformed adaptive methods from previous work. We further demonstrate the versatility and applicability of SituationAdapt with a set of application scenarios. + + + +### Desk2Desk: Optimization-based Mixed Reality Workspace Integration for Remote Side-by-side Collaboration +Authors: Ludwig Sidenmark, Tianyu Zhang, Leen Al Lababidi, Jiannan Li, Tovi Grossman + +[Link](https://programs.sigchi.org/uist/2024/program/content/170830) + +Abstract: Mixed Reality enables hybrid workspaces where physical and virtual monitors are adaptively created and moved to suit the current environment and needs. However, in shared settings, individual users’ workspaces are rarely aligned and can vary significantly in the number of monitors, available physical space, and workspace layout, creating inconsistencies between workspaces which may cause confusion and reduce collaboration. We present Desk2Desk, an optimization-based approach for remote collaboration in which the hybrid workspaces of two collaborators are fully integrated to enable immersive side-by-side collaboration. The optimization adjusts each user’s workspace in layout and number of shared monitors and creates a mapping between workspaces to handle inconsistencies between workspaces due to physical constraints (e.g. physical monitors). We show in a user study how our system adaptively merges dissimilar physical workspaces to enable immersive side-by-side collaboration, and demonstrate how an optimization-based approach can effectively address dissimilar physical layouts. + + + + +### SpaceBlender: Creating Context-Rich Collaborative Spaces Through Generative 3D Scene Blending +Authors: Nels Numan, Shwetha Rajaram, Bala Kumaravel, Nicolai Marquardt, Andrew Wilson + +[Link](https://programs.sigchi.org/uist/2024/program/content/170843) + +Abstract: There is increased interest in using generative AI to create 3D spaces for virtual reality (VR) applications. However, today’s models produce artificial environments, falling short of supporting collaborative tasks that benefit from incorporating the user's physical context. To generate environments that support VR telepresence, we introduce SpaceBlender, a novel pipeline that utilizes generative AI techniques to blend users' physical surroundings into unified virtual spaces. This pipeline transforms user-provided 2D images into context-rich 3D environments through an iterative process consisting of depth estimation, mesh alignment, and diffusion-based space completion guided by geometric priors and adaptive text prompts. In a preliminary within-subjects study, where 20 participants performed a collaborative VR affinity diagramming task in pairs, we compared SpaceBlender with a generic virtual environment and a state-of-the-art scene generation framework, evaluating its ability to create virtual spaces suitable for collaboration. Participants appreciated the enhanced familiarity and context provided by SpaceBlender but also noted complexities in the generative environments that could detract from task focus. Drawing on participant feedback, we propose directions for improving the pipeline and discuss the value and design of blended spaces for different scenarios. + + + + +## AI & Automation +### Memolet: Reifying the Reuse of User-AI Conversational Memories +Authors: Ryan Yen, Jian Zhao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170751) + +Abstract: As users engage more frequently with AI conversational agents, conversations may exceed their memory capacity, leading to failures in correctly leveraging certain memories for tailored responses. However, in finding past memories that can be reused or referenced, users need to retrieve relevant information in various conversations and articulate to the AI their intention to reuse these memories. To support this process, we introduce Memolet, an interactive object that reifies memory reuse. Users can directly manipulate Memolet to specify which memories to reuse and how to use them. We developed a system demonstrating Memolet's interaction across various memory reuse stages, including memory extraction, organization, prompt articulation, and generation refinement. We examine the system's usefulness with an N=12 within-subject study and provide design implications for future systems that support user-AI conversational memory reusing. + + + +### VIME: Visual Interactive Model Explorer for Identifying Capabilities and Limitations of Machine Learning Models for Sequential Decision-Making +Authors: Anindya Das Antar, Somayeh Molaei, Yan-Ying Chen, Matthew Lee, Nikola Banovic + +[Link](https://programs.sigchi.org/uist/2024/program/content/170861) + +Abstract: Ensuring that Machine Learning (ML) models make correct and meaningful inferences is necessary for the broader adoption of such models into high-stakes decision-making scenarios. Thus, ML model engineers increasingly use eXplainable AI (XAI) tools to investigate the capabilities and limitations of their ML models before deployment. However, explaining sequential ML models, which make a series of decisions at each timestep, remains challenging. We present Visual Interactive Model Explorer (VIME), an XAI toolbox that enables ML model engineers to explain decisions of sequential models in different ``what-if'' scenarios. Our evaluation with 14 ML experts, who investigated two existing sequential ML models using VIME and a baseline XAI toolbox to explore ``what-if'' scenarios, showed that VIME made it easier to identify and explain instances when the models made wrong decisions compared to the baseline. Our work informs the design of future interactive XAI mechanisms for evaluating sequential ML-based decision support systems. + + + +### SERENUS: Alleviating Low-Battery Anxiety Through Real-time, Accurate, and User-Friendly Energy Consumption Prediction of Mobile Applications +Authors: Sera Lee, Dae R. Jeong, Junyoung Choi, Jaeheon Kwak, Seoyun Son, Jean Song, Insik Shin + +[Link](https://programs.sigchi.org/uist/2024/program/content/170937) + +Abstract: Low-battery anxiety has emerged as a result of growing dependence on mobile devices, where the anxiety arises when the battery level runs low. While battery life can be extended through power-efficient hardware and software optimization techniques, low-battery anxiety will remain a phenomenon as long as mobile devices rely on batteries. In this paper, we investigate how an accurate real-time energy consumption prediction at the application-level can improve the user experience in low-battery situations. We present Serenus, a mobile system framework specifically tailored to predict the energy consumption of each mobile application and present the prediction in a user-friendly manner. We conducted user studies using Serenus to verify that highly accurate energy consumption predictions can effectively alleviate low-battery anxiety by assisting users in planning their application usage based on the remaining battery life. We summarize requirements to mitigate users’ anxiety, guiding the design of future mobile system frameworks. + + + + +## Poses as Input +### SolePoser: Real-Time 3D Human Pose Estimation using Insole Pressure Sensors +Authors: Erwin Wu, Rawal Khirodkar, Hideki Koike, Kris Kitani + +[Link](https://programs.sigchi.org/uist/2024/program/content/170905) + +Abstract: We propose SolePoser, a real-time 3D pose estimation system that leverages only a single pair of insole sensors. Unlike conventional methods relying on fixed cameras or bulky wearable sensors, our approach offers minimal and natural setup requirements. The proposed system utilizes pressure and IMU sensors embedded in insoles to capture the body weight's pressure distribution at the feet and its 6 DoF acceleration. This information is used to estimate the 3D full-body joint position by a two-stream transformer network. A novel double-cycle consistency loss and a cross-attention module are further introduced to learn the relationship between 3D foot positions and their pressure distributions. +We also introduced two different datasets of sports and daily exercises, offering 908k frames across eight different activities. Our experiments show that our method's performance is on par with top-performing approaches, which utilize more IMUs and even outperform third-person-view camera-based methods in certain scenarios. + + + +### Gait Gestures: Examining Stride and Foot Strike Variation as an Input Method While Walking +Authors: Ching-Yi Tsai, Ryan Yen, Daekun Kim, Daniel Vogel + +[Link](https://programs.sigchi.org/uist/2024/program/content/170926) + +Abstract: Walking is a cyclic pattern of alternating footstep strikes, with each pair of steps forming a stride, and a series of strides forming a gait. We conduct a systematic examination of different kinds of intentional variations from a normal gait that could be used as input actions without interrupting overall walking progress. A design space of 22 candidate Gait Gestures is generated by adapting previous standing foot input actions and identifying new actions possible in a walking context. A formative study (n=25) examines movement easiness, social acceptability, and walking compatibility with foot movement logging to calculate temporal and spatial characteristics. Using a categorization of these results, 7 gestures are selected for a wizard-of-oz prototype demonstrating an AR interface controlled by Gait Gestures for ordering food and audio playback while walking. As a technical proof-of-concept, a gait gesture recognizer is developed and tested using the formative study data. + + + +### EgoTouch: On-Body Touch Input Using AR/VR Headset Cameras +Authors: Vimal Mollyn, Chris Harrison + +[Link](https://programs.sigchi.org/uist/2024/program/content/170875) + +Abstract: In augmented and virtual reality (AR/VR) experiences, a user’s arms and hands can provide a convenient and tactile surface for touch input. Prior work has shown on-body input to have significant speed, accuracy, and ergonomic benefits over in-air interfaces, which are common today. In this work, we demonstrate high accuracy, bare hands (i.e., no special instrumentation of the user) skin input using just an RGB camera, like those already integrated into all modern XR headsets. Our results show this approach can be accurate, and robust across diverse lighting conditions, skin tones, and body motion (e.g., input while walking). Finally, our pipeline also provides rich input metadata including touch force, finger identification, angle of attack, and rotation. We believe these are the requisite technical ingredients to more fully unlock on-skin interfaces that have been well motivated in the HCI literature but have lacked robust and practical methods. + + + +### MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices +Authors: Vasco Xu, Chenfeng Gao, Henry Hoffman, Karan Ahuja + +[Link](https://programs.sigchi.org/uist/2024/program/content/170732) + +Abstract: There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few. + + + +### Touchscreen-based Hand Tracking for Remote Whiteboard Interaction +Authors: Xinshuang Liu, Yizhong Zhang, Xin Tong + +[Link](https://programs.sigchi.org/uist/2024/program/content/170956) + +Abstract: In whiteboard-based remote communication, the seamless integration of drawn content and hand-screen interactions is essential for an immersive user experience. Previous methods either require bulky device setups for capturing hand gestures or fail to accurately track the hand poses from capacitive images. In this paper, we present a real-time method for precise tracking 3D poses of both hands from capacitive video frames. To this end, we develop a deep neural network to identify hands and infer hand joint positions from capacitive frames, and then recover 3D hand poses from the hand-joint positions via a constrained inverse kinematic solver. Additionally, we design a device setup for capturing high-quality hand-screen interaction data and obtained a more accurate synchronized capacitive video and hand pose dataset. Our method improves the accuracy and stability of 3D hand tracking for capacitive frames while maintaining a compact device setup for remote communication. We validate our scheme design and its superior performance on 3D hand pose tracking and demonstrate the effectiveness of our method in whiteboard-based remote communication. + + + +### SeamPose: Repurposing Seams as Capacitive Sensors in a Shirt for Upper-Body Pose Tracking +Authors: Tianhong Yu, Mary Zhang, Peter He, Chi-Jung Lee, Cassidy Cheesman, Saif Mahmud, Ruidong Zhang, Francois Guimbretiere, Cheng Zhang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170739) + +Abstract: Seams are areas of overlapping fabric formed by stitching two or more pieces of fabric together in the cut-and-sew apparel manufacturing process. In SeamPose, we repurposed seams as capacitive sensors in a shirt for continuous upper-body pose estimation. Compared to previous all-textile motion-capturing garments that place the electrodes on the clothing surface, our solution leverages existing seams inside of a shirt by machine-sewing insulated conductive threads over the seams. The unique invisibilities and placements of the seams afford the sensing shirt to look and wear similarly as a conventional shirt while providing exciting pose-tracking capabilities. To validate this approach, we implemented a proof-of-concept untethered shirt with 8 capacitive sensing seams. With a 12-participant user study, our customized deep-learning pipeline accurately estimates the relative (to the pelvis) upper-body 3D joint positions with a mean per joint position error (MPJPE) of 6.0 cm. SeamPose represents a step towards unobtrusive integration of smart clothing for everyday pose estimation. + + + + +## AI as Copilot +### DiscipLink: Unfolding Interdisciplinary Information Seeking Process via Human-AI Co-Exploration +Authors: Chengbo Zheng, Yuanhao Zhang, Zeyu Huang, Chuhan Shi, Minrui Xu, Xiaojuan Ma + +[Link](https://programs.sigchi.org/uist/2024/program/content/170741) + +Abstract: Interdisciplinary studies often require researchers to explore literature in diverse branches of knowledge. Yet, navigating through the highly scattered knowledge from unfamiliar disciplines poses a significant challenge. In this paper, we introduce DiscipLink, a novel interactive system that facilitates collaboration between researchers and large language models (LLMs) in interdisciplinary information seeking (IIS). Based on users' topic of interest, DiscipLink initiates exploratory questions from the perspectives of possible relevant fields of study, and users can further tailor these questions. DiscipLink then supports users in searching and screening papers under selected questions by automatically expanding queries with disciplinary-specific terminologies, extracting themes from retrieved papers, and highlighting the connections between papers and questions. Our evaluation, comprising a within-subject comparative experiment and an open-ended exploratory study, reveals that DiscipLink can effectively support researchers in breaking down disciplinary boundaries and integrating scattered knowledge in diverse fields. The findings underscore the potential of LLM-powered tools in fostering information-seeking practices and bolstering interdisciplinary research. + + + +### Improving Steering and Verification in AI-Assisted Data Analysis with Interactive Task Decomposition +Authors: Majeed Kazemitabaar, Jack Williams, Ian Drosos, Tovi Grossman, Austin Henley, Carina Negreanu, Advait Sarkar + +[Link](https://programs.sigchi.org/uist/2024/program/content/170918) + +Abstract: LLM-powered tools like ChatGPT Data Analysis, have the potential to help users tackle the challenging task of data analysis programming, which requires expertise in data processing, programming, and statistics. However, our formative study (n=15) uncovered serious challenges in verifying AI-generated results and steering the AI (i.e., guiding the AI system to produce the desired output). We developed two contrasting approaches to address these challenges. The first (Stepwise) decomposes the problem into step-by-step subgoals with pairs of editable assumptions and code until task completion, while the second (Phasewise) decomposes the entire problem into three editable, logical phases: structured input/output assumptions, execution plan, and code. A controlled, within-subjects experiment (n=18) compared these systems against a conversational baseline. Users reported significantly greater control with the Stepwise and Phasewise systems, and found intervention, correction, and verification easier, compared to the baseline. The results suggest design guidelines and trade-offs for AI-assisted data analysis tools. + + + + +### VizGroup: An AI-assisted Event-driven System for Collaborative Programming Learning Analytics +Authors: Xiaohang Tang, Sam Wong, Kevin Pu, Xi Chen, Yalong Yang, Yan Chen + +[Link](https://programs.sigchi.org/uist/2024/program/content/170725) + +Abstract: Programming instructors often conduct collaborative learning activities, like Peer Instruction, to foster a deeper understanding in students and enhance their engagement with learning. These activities, however, may not always yield productive outcomes due to the diversity of student mental models and their ineffective collaboration. In this work, we introduce VizGroup, an AI-assisted system that enables programming instructors to easily oversee students' real-time collaborative learning behaviors during large programming courses. VizGroup leverages Large Language Models (LLMs) to recommend event specifications for instructors so that they can simultaneously track and receive alerts about key correlation patterns between various collaboration metrics and ongoing coding tasks. We evaluated VizGroup with 12 instructors in a comparison study using a dataset collected from a Peer Instruction activity that was conducted in a large programming lecture. +The results showed that VizGroup helped instructors effectively overview, narrow down, and track nuances throughout students' behaviors. + + + +### Who did it? How User Agency is influenced by Visual Properties of Generated Images +Authors: Johanna Didion, Krzysztof Wolski, Dennis Wittchen, David Coyle, Thomas Leimkühler, Paul Strohmeier + +[Link](https://programs.sigchi.org/uist/2024/program/content/170827) + +Abstract: The increasing proliferation of AI and GenAI requires new interfaces tailored to how their specific affordances and human requirements meet. As GenAI is capable of taking over tasks from users on an unprecedented scale, designing the experience of agency -- if and how users experience control over the process and responsibility over the outcome -- is crucial. As an initial step towards design guidelines for shaping agency, we present a study that explores how features of AI-generated images influence users' experience of agency. We use two measures; temporal binding to implicitly estimate pre-reflective agency and magnitude estimation to assess user judgments of agency. We observe that abstract images lead to more temporal binding than images with semantic meaning. In contrast, the closer an image aligns with what a user might expect, the higher the agency judgment. When comparing the experiment results with objective metrics of image differences, we find that temporal binding results correlate with semantic differences, while agency judgments are better explained by local differences between images. This work contributes towards a future where agency is considered an important design dimension for GenAI interfaces. + + + +### FathomGPT: A Natural Language Interface for Interactively Exploring Ocean Science Data +Authors: Nabin Khanal, Chun Meng Yu, Jui-Cheng Chiu, Anav Chaudhary, Ziyue Zhang, Kakani Katija, Angus Forbes + +[Link](https://programs.sigchi.org/uist/2024/program/content/171001) + +Abstract: We introduce FathomGPT, an open source system for the interactive investigation of ocean science data via a natural language interface. FathomGPT was developed in close collaboration with marine scientists to enable researchers and ocean enthusiasts to explore and analyze the FathomNet image database. FathomGPT provides a custom information retrieval pipeline that leverages OpenAI’s large language models to enable: the creation of complex queries to retrieve images, taxonomic information, and scientific measurements; mapping common names and morphological features to scientific names; generating interactive charts on demand; and searching by image or specified patterns within an image. In designing FathomGPT, particular emphasis was placed on enhancing the user's experience by facilitating free-form exploration and optimizing response times. We present an architectural overview and implementation details of FathomGPT, along with a series of ablation studies that demonstrate the effectiveness of our approach to name resolution, fine tuning, and prompt modification. Additionally, we present usage scenarios of interactive data exploration sessions and document feedback from ocean scientists and machine learning experts. + + + +### VRCopilot: Authoring 3D Layouts with Generative AI Models in VR +Authors: Lei Zhang, Jin Pan, Jacob Gettig, Steve Oney, Anhong Guo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170933) + +Abstract: Immersive authoring provides an intuitive medium for users to create 3D scenes via direct manipulation in Virtual Reality (VR). Recent advances in generative AI have enabled the automatic creation of realistic 3D layouts. However, it is unclear how capabilities of generative AI can be used in immersive authoring to support fluid interactions, user agency, and creativity. We introduce VRCopilot, a mixed-initiative system that integrates pre-trained generative AI models into immersive authoring to facilitate human-AI co-creation in VR. VRCopilot presents multimodal interactions to support rapid prototyping and iterations with AI, and intermediate representations such as wireframes to augment user controllability over the created content. Through a series of user studies, we evaluated the potential and challenges in manual, scaffolded, and automatic creation in immersive authoring. We found that scaffolded creation using wireframes enhanced the user agency compared to automatic creation. We also found that manual creation via multimodal specification offers the highest sense of creativity and agency. + + + + +## Body as the interface +### MouthIO: Fabricating Customizable Oral User Interfaces with Integrated Sensing and Actuation +Authors: Yijing Jiang, Julia Kleinau, Till Max Eckroth, Eve Hoggan, Stefanie Mueller, Michael Wessely + +[Link](https://programs.sigchi.org/uist/2024/program/content/170798) + +Abstract: This paper introduces MouthIO, the first customizable intraoral user interface that can be equipped with various sensors and output components. MouthIO consists of an SLA-printed brace that houses a flexible PCB within a bite-proof enclosure positioned between the molar teeth and inner cheeks. Our MouthIO design and fabrication technique enables makers to customize the oral user interfaces in both form and function at low cost. All parts in contact with the oral cavity are made of bio-compatible materials to ensure safety, while the design takes into account both comfort and portability. We demonstrate MouthIO through three application examples ranging from beverage consumption monitoring, health monitoring, to assistive technology. Results from our full-day user study indicate high wearability and social acceptance levels, while our technical evaluation demonstrates the device's ability to withstand adult bite forces. + + + +### Can a Smartwatch Move Your Fingers? Compact and Practical Electrical Muscle Stimulation in a Smartwatch +HONORABLE_MENTION + +Authors: Akifumi Takahashi, Yudai Tanaka, Archit Tamhane, Alan Shen, Shan-Yuan Teng, Pedro Lopes + +[Link](https://programs.sigchi.org/uist/2024/program/content/170990) + +Abstract: Smartwatches gained popularity in the mainstream, making them into today’s de-facto wearables. Despite advancements in sensing, haptics on smartwatches is still restricted to tactile feedback (e.g., vibration). Most smartwatch-sized actuators cannot render strong force-feedback. Simultaneously, electrical muscle stimulation (EMS) promises compact force-feedback but, to actuate fingers requires users to wear many electrodes on their forearms. While forearm electrodes provide good accuracy, they detract EMS from being a practical force-feedback interface. To address this, we propose moving the electrodes to the wrist—conveniently packing them in the backside of a smartwatch. In our first study, we found that by cross-sectionally stimulating the wrist in 1,728 trials, we can actuate thumb extension, index extension & flexion, middle flexion, pinky flexion, and wrist flexion. Following, we engineered a compact EMS that integrates directly into a smartwatch’s wristband (with a custom stimulator, electrodes, demultiplexers, and communication). In our second study, we found that participants could calibrate our device by themselves ~50% faster than with conventional EMS. Furthermore, all participants preferred the experience of this device, especially for its social acceptability & practicality. We believe that our approach opens new applications for smartwatch-based interactions, such as haptic assistance during everyday tasks. + + + +### Power-over-Skin: Full-Body Wearables Powered By Intra-Body RF Energy +Authors: Andy Kong, Daehwa Kim, Chris Harrison + +[Link](https://programs.sigchi.org/uist/2024/program/content/170775) + +Abstract: Powerful computing devices are now small enough to be easily worn on the body. However, batteries pose a major design and user experience obstacle, adding weight and volume, and generally requiring periodic device removal and recharging. In response, we developed Power-over-Skin, an approach using the human body itself to deliver power to many distributed, battery-free, worn devices. We demonstrate power delivery from on-body distances as far as from head-to-toe, with sufficient energy to power microcontrollers capable of sensing and wireless communication. We share results from a study campaign that informed our implementation, as well as experiments that validate our final system. We conclude with several demonstration devices, ranging from input controllers to longitudinal bio-sensors, which highlight the efficacy and potential of our approach. + + + +### HandPad: Make Your Hand an On-the-go Writing Pad via Human Capacitance +Authors: Yu Lu, Dian Ding, Hao Pan, Yijie Li, Juntao Zhou, Yongjian Fu, Yongzhao Zhang, Yi-Chao Chen, Guangtao Xue + +[Link](https://programs.sigchi.org/uist/2024/program/content/170761) + +Abstract: The convenient text input system is a pain point for devices such as AR glasses, and it is difficult for existing solutions to balance portability and efficiency. This paper introduces HandPad, the system that turns the hand into an on-the-go touchscreen, which realizes interaction on the hand via human capacitance. HandPad achieves keystroke and handwriting inputs for letters, numbers, and Chinese characters, reducing the dependency on capacitive or pressure sensor arrays. Specifically, the system verifies the feasibility of touch point localization on the hand using the human capacitance model and proposes a handwriting recognition system based on Bi-LSTM and ResNet. The transfer learning-based system only needs a small amount of training data to build a handwriting recognition model for the target user. Experiments in real environments verify the feasibility of HandPad for keystroke (accuracy of 100%) and handwriting recognition for letters (accuracy of 99.1%), numbers (accuracy of 97.6%) and Chinese characters (accuracy of 97.9%). + + + + +## New Vizualizations +### VisCourt: In-Situ Guidance for Interactive Tactic Training in Mixed Reality +Authors: Liqi Cheng, Hanze Jia, Lingyun Yu, Yihong Wu, Shuainan Ye, Dazhen Deng, Hui Zhang, Xiao Xie, Yingcai Wu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170791) + +Abstract: In team sports like basketball, understanding and executing tactics---coordinated plans of movements among players---are crucial yet complex, requiring extensive practice. These tactics require players to develop a keen sense of spatial and situational awareness. Traditional coaching methods, which mainly rely on basketball tactic boards and video instruction, often fail to bridge the gap between theoretical learning and the real-world application of tactics, due to shifts in view perspectives and a lack of direct experience with tactical scenarios. To address this challenge, we introduce VisCourt, a Mixed Reality (MR) tactic training system, in collaboration with a professional basketball team. To set up the MR training environment, we employed semi-automatic methods to simulate realistic 3D tactical scenarios and iteratively designed visual in-situ guidance. This approach enables full-body engagement in interactive training sessions on an actual basketball court and provides immediate feedback, significantly enhancing the learning experience. A user study with athletes and enthusiasts shows the effectiveness and satisfaction with VisCourt in basketball training and offers insights for the design of future SportsXR training systems. + + + +### Block and Detail: Scaffolding Sketch-to-Image Generation +Authors: Vishnu Sarukkai, Lu Yuan, Mia Tang, Maneesh Agrawala, Kayvon Fatahalian + +[Link](https://programs.sigchi.org/uist/2024/program/content/170911) + +Abstract: We introduce a novel sketch-to-image tool that aligns with the iterative refinement process of artists. Our tool lets users sketch blocking strokes to coarsely represent the placement and form of objects and detail strokes to refine their shape and silhouettes. We develop a two-pass algorithm for generating high-fidelity images from such sketches at any point in the iterative process. In the first pass we use a ControlNet to generate an image that strictly follows all the strokes (blocking and detail) and in the second pass we add variation by renoising regions surrounding blocking strokes. We also present a dataset generation scheme that, when used to train a ControlNet architecture, allows regions that do not contain strokes to be interpreted as not-yet-specified regions rather than empty space. We show that this partial-sketch-aware ControlNet can generate coherent elements from partial sketches that only contain a small number of strokes. The high-fidelity images produced by our approach serve as scaffolds that can help the user adjust the shape and proportions of objects or add additional elements to the composition. We demonstrate the effectiveness of our approach with a variety of examples and evaluative comparisons. Quantitatively, novice viewers prefer the quality of images from our algorithm over a baseline Scribble ControlNet for 82% of the pairs and found our images had less distortion in 80% of the pairs. + + + +### EVE: Enabling Anyone to Train Robots using Augmented Reality +Authors: Jun Wang, Chun-Cheng Chang, Jiafei Duan, Dieter Fox, Ranjay Krishna + +[Link](https://programs.sigchi.org/uist/2024/program/content/170803) + +Abstract: The increasing affordability of robot hardware is accelerating the integration of robots into everyday activities. However, training a robot to automate a task requires expensive trajectory data where a trained human annotator moves a physical robot to train it. Consequently, only those with access to robots produce demonstrations to train robots. In this work, we remove this restriction with EVE, an iOS app that enables everyday users to train robots using intuitive augmented reality visualizations, without needing a physical robot. With EVE, users can collect demonstrations by specifying waypoints with their hands, visually inspecting the environment for obstacles, modifying existing waypoints, and verifying collected trajectories. In a user study (N=14, D=30) consisting of three common tabletop tasks, EVE outperformed three state-of-the-art interfaces in success rate and was comparable to kinesthetic teaching—physically moving a physical robot—in completion time, usability, motion intent communication, enjoyment, and preference (mean of p=0.30). EVE allows users to train robots for personalized tasks, such as sorting desk supplies, organizing ingredients, or setting up board games. We conclude by enumerating limitations and design considerations for future AR-based demonstration collection systems for robotics. + + + +### avaTTAR: Table Tennis Stroke Training with On-body and Detached Visualization in Augmented Reality +Authors: Dizhi Ma, Xiyun Hu, Jingyu Shi, Mayank Patel, Rahul Jain, Ziyi Liu, Zhengzhe Zhu, Karthik Ramani + +[Link](https://programs.sigchi.org/uist/2024/program/content/170894) + +Abstract: Table tennis stroke training is a critical aspect of player development. We designed a new augmented reality (AR) system, avaTTAR, for table tennis stroke training. The system provides both “on-body” (first-person view) and “detached” (third-person view) +visual cues, enabling users to visualize target strokes and correct their attempts effectively with this dual perspectives setup. By employing a combination of pose estimation algorithms and IMU sensors, avaTTAR captures and reconstructs the 3D body pose and paddle orientation of users during practice, allowing real-time comparison with expert strokes. Through a user study, we affirm avaTTAR ’s capacity to amplify player experience and training results + + + + +## Big to Small Fab +### Don't Mesh Around: Streamlining Manual-Digital Fabrication Workflows with Domain-Specific 3D Scanning +Authors: Ilan Moyer, Sam Bourgault, Devon Frost, Jennifer Jacobs + +[Link](https://programs.sigchi.org/uist/2024/program/content/170846) + +Abstract: Software-first digital fabrication workflows are often at odds with material-driven approaches to design. Material-driven design is especially critical in manual ceramics, where the craftsperson shapes the form through hands-on engagement. We present the Craft-Aligned Scanner (CAS), a 3D scanning and clay-3D printing system that enables practitioners to design for digital fabrication through traditional pottery techniques. The CAS augments a pottery wheel that has 3D printing capabilities with a precision distance sensor on a vertically oriented linear axis. By increasing the height of the sensor as the wheel turns, we directly synthesize a 3D spiralized toolpath from the geometry of the object on the wheel, enabling the craftsperson to immediately transition from manual fabrication to 3D printing without leaving the tool. We develop new digital fabrication workflows with CAS to augment scanned forms with functional features and add both procedurally and real-time-generated surface textures. CAS demonstrates how 3D printers can support material-first digital fabrication design without foregoing the expressive possibilities of software-based design. + + + +### E-Joint: Fabrication of Large-Scale Interactive Objects Assembled by 3D Printed Conductive Parts with Copper Plated Joints +Authors: Xiaolong Li, Cheng Yao, Shang Shi, Shuyue Feng, Yujie Zhou, Haoye Dong, Shichao Huang, Xueyan Cai, Kecheng Jin, Fangtian Ying, Guanyun Wang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170987) + +Abstract: The advent of conductive thermoplastic filaments and multi-material 3D printing has made it feasible to create interactive 3D printed objects. Yet, challenges arise due to volume constraints of desktop 3D printers and high resistive characteristics of current conductive materials, making the fabrication of large-scale or highly conductive interactive objects can be daunting. We propose E-Joint, a novel fabrication pipeline for 3D printed objects utilizing mortise and tenon joint structures combined with a copper plating process. The segmented pieces and joint structures are customized in software along with integrated circuits. Then electroplate them for enhanced conductivity. We designed four distinct electrified joint structures in experiment and evaluated the practical feasibility and effectiveness of fabricating pipes. By constructing three applications with those structures, we verified the usability of E-Joint in making large-scale interactive objects and show path to a more integrated future for manufacturing. + + + +### MobiPrint: A Mobile 3D Printer for Environment-Scale Design and Fabrication +Authors: Daniel Campos Zamora, Liang He, Jon Froehlich + +[Link](https://programs.sigchi.org/uist/2024/program/content/170934) + +Abstract: 3D printing is transforming how we customize and create physical objects in engineering, accessibility, and art. However, this technology is still primarily limited to confined working areas and dedicated print beds thereby detaching design and fabrication from real-world environments and making measuring and scaling objects tedious and labor-intensive. In this paper, we present MobiPrint, a prototype mobile fabrication system that combines elements from robotics, architecture, and Human-Computer Interaction (HCI) to enable environment-scale design and fabrication in ad-hoc indoor environments. MobiPrint provides a multi-stage fabrication pipeline: first, the robotic 3D printer automatically scans and maps an indoor space; second, a custom design tool converts the map into an interactive CAD canvas for editing and placing models in the physical world; finally, the MobiPrint robot prints the object directly on the ground at the defined location. Through a "proof-by-demonstration" validation, we highlight our system's potential across different applications, including accessibility, home furnishing, floor signage, and art. We also conduct a technical evaluation to assess MobiPrint’s localization accuracy, ground surface adhesion, payload capacity, and mapping speed. We close with a discussion of open challenges and opportunities for the future of contextualized mobile fabrication. + + + +### StructCurves: Interlocking Block-Based Line Structures +Authors: Zezhou Sun, Devin Balkcom, Emily Whiting + +[Link](https://programs.sigchi.org/uist/2024/program/content/171006) + +Abstract: We present a new class of curved block-based line structures whose component chains are flexible when separated, and provably rigid when assembled together into an interlocking double chain. The joints are inspired by traditional zippers, where a binding fabric or mesh connects individual teeth. +Unlike traditional zippers, the joint design produces a rigid interlock with programmable curvature. This allows fairly strong curved structures to be built out of easily stored flexible chains. +In this paper, we introduce a pipeline for generating these curved structures using a novel block design template based on revolute joints. +Mesh embedded in these structures maintains block spacing and assembly order. We evaluate the rigidity of the curved structures through mechanical performance testing and demonstrate several applications. + + + + +## Machine Learning for User Interfaces +### UIClip: A Data-driven Model for Assessing User Interface Design +Authors: Jason Wu, Yi-Hao Peng, Xin Yue Li, Amanda Swearngin, Jeffrey Bigham, Jeffrey Nichols + +[Link](https://programs.sigchi.org/uist/2024/program/content/170950) + +Abstract: User interface (UI) design is a difficult yet important task for ensuring the usability, accessibility, and aesthetic qualities of applications. In our paper, we develop a machine-learned model, UIClip, for assessing the design quality and visual relevance of a UI given its screenshot and natural language description. To train UIClip, we used a combination of automated crawling, synthetic augmentation, and human ratings to construct a large-scale dataset of UIs, collated by description and ranked by design quality. Through training on the dataset, UIClip implicitly learns properties of good and bad designs by (i) assigning a numerical score that represents a UI design's relevance and quality and (ii) providing design suggestions. In an evaluation that compared the outputs of UIClip and other baselines to UIs rated by 12 human designers, we found that UIClip achieved the highest agreement with ground-truth rankings. Finally, we present three example applications that demonstrate how UIClip can facilitate downstream applications that rely on instantaneous assessment of UI design quality: (i) UI code generation, (ii) UI design tips generation, and (iii) quality-aware UI example search. + + + +### UICrit: Enhancing Automated Design Evaluation with a UI Critique Dataset +Authors: Peitong Duan, Chin-Yi Cheng, Gang Li, Bjoern Hartmann, Yang Li + +[Link](https://programs.sigchi.org/uist/2024/program/content/170823) + +Abstract: Automated UI evaluation can be beneficial for the design process; for example, to compare different UI designs, or conduct automated heuristic evaluation. LLM-based UI evaluation, in particular, holds the promise of generalizability to a wide variety of UI types and evaluation tasks. However, current LLM-based techniques do not yet match the performance of human evaluators. We hypothesize that automatic evaluation can be improved by collecting a targeted UI feedback dataset and then using this dataset to enhance the performance of general-purpose LLMs. We present a targeted dataset of 3,059 design critiques and quality ratings for 983 mobile UIs, collected from seven designers, each with at least a year of professional design experience. We carried out an in-depth analysis to characterize the dataset's features. We then applied this dataset to achieve a 55\% performance gain in LLM-generated UI feedback via various few-shot and visual prompting techniques. We also discuss future applications of this dataset, including training a reward model for generative UI techniques, and fine-tuning a tool-agnostic multi-modal LLM that automates UI evaluation. + + + +### EyeFormer: Predicting Personalized Scanpaths with Transformer-Guided Reinforcement Learning +Authors: Yue Jiang, Zixin Guo, Hamed Rezazadegan Tavakoli, Luis Leiva, Antti Oulasvirta + +[Link](https://programs.sigchi.org/uist/2024/program/content/170925) + +Abstract: From a visual-perception perspective, modern graphical user interfaces (GUIs) comprise a complex graphics-rich two-dimensional visuospatial arrangement of text, images, and interactive objects such as buttons and menus. While existing models can accurately predict regions and objects that are likely to attract attention ``on average'', no scanpath model has been capable of predicting scanpaths for an individual. To close this gap, we introduce EyeFormer, which utilizes a Transformer architecture as a policy network to guide a deep reinforcement learning algorithm that predicts gaze locations. Our model offers the unique capability of producing personalized predictions when given a few user scanpath samples. It can predict full scanpath information, including fixation positions and durations, across individuals and various stimulus types. Additionally, we demonstrate applications in GUI layout optimization driven by our model. + + + +### GPTVoiceTasker: Advancing Multi-step Mobile Task Efficiency Through Dynamic Interface Exploration and Learning +Authors: Minh Duc Vu, Han Wang, Jieshan Chen, Zhuang Li, Shengdong Zhao, Zhenchang Xing, Chunyang Chen + +[Link](https://programs.sigchi.org/uist/2024/program/content/170994) + +Abstract: Virtual assistants have the potential to play an important role in helping users achieve different tasks. However, these systems face challenges in their real-world usability, characterized by inefficiency and struggles in grasping user intentions. Leveraging recent advances in Large Language Models (LLMs), we introduce GPTVoiceTasker, a virtual assistant poised to enhance user experiences and task efficiency on mobile devices. GPTVoiceTasker excels at intelligently deciphering user commands and executing relevant device interactions to streamline task completion. For unprecedented tasks, GPTVoiceTasker utilises the contextual information and on-screen content to continuously explore and execute the tasks. In addition, the system continually learns from historical user commands to automate subsequent task invocations, further enhancing execution efficiency. From our experiments, GPTVoiceTasker achieved 84.5% accuracy in parsing human commands into executable actions and 85.7% accuracy in automating multi-step tasks. In our user study, GPTVoiceTasker boosted task efficiency in real-world scenarios by 34.85%, accompanied by positive participant feedback. We made GPTVoiceTasker open-source, inviting further research into LLMs utilization for diverse tasks through prompt engineering and leveraging user usage data to improve efficiency. + + + +### VisionTasker: Mobile Task Automation Using Vision Based UI Understanding and LLM Task Planning +Authors: Yunpeng Song, Yiheng Bian, Yongtao Tang, Guiyu Ma, Zhongmin Cai + +[Link](https://programs.sigchi.org/uist/2024/program/content/170816) + +Abstract: Mobile task automation is an emerging field that leverages AI to streamline and optimize the execution of routine tasks on mobile devices, thereby enhancing efficiency and productivity. Traditional methods, such as Programming By Demonstration (PBD), are limited due to their dependence on predefined tasks and susceptibility to app updates. Recent advancements have utilized the view hierarchy to collect UI information and employed Large Language Models (LLM) to enhance task automation. However, view hierarchies have accessibility issues and face potential problems like missing object descriptions or misaligned structures. This paper introduces VisionTasker, a two-stage framework combining vision-based UI understanding and LLM task planning, for mobile task automation in a step-by-step manner. VisionTasker firstly converts a UI screenshot into natural language interpretations using a vision-based UI understanding approach, eliminating the need for view hierarchies. Secondly, it adopts a step-by-step task planning method, presenting one interface at a time to the LLM. The LLM then identifies relevant elements within the interface and determines the next action, enhancing accuracy and practicality. Extensive experiments show that VisionTasker outperforms previous methods, providing effective UI representations across four datasets. Additionally, in automating 147 real-world tasks on an Android smartphone, VisionTasker demonstrates advantages over humans in tasks where humans show unfamiliarity and shows significant improvements when integrated with the PBD mechanism. VisionTasker is open-source and available at https://github.com/AkimotoAyako/VisionTasker. + + + + +## Programming UI +### NotePlayer: Engaging Jupyter Notebooks for Dynamic Presentation of Analytical Processes +Authors: Yang Ouyang, Leixian Shen, Yun Wang, Quan Li + +[Link](https://programs.sigchi.org/uist/2024/program/content/170819) + +Abstract: Diverse presentation formats play a pivotal role in effectively conveying code and analytical processes during data analysis. One increasingly popular format is tutorial videos, particularly those based on Jupyter notebooks, which offer an intuitive interpretation of code and vivid explanations of analytical procedures. However, creating such videos requires a diverse skill set and significant manual effort, posing a barrier for many analysts. To bridge this gap, we introduce an innovative tool called NotePlayer, which connects notebook cells to video segments and incorporates a computational engine with language models to streamline video creation and editing. Our aim is to make the process more accessible and efficient for analysts. To inform the design of NotePlayer, we conducted a formative study and performed content analysis on a corpus of 38 Jupyter tutorial videos. This helped us identify key patterns and challenges encountered in existing tutorial videos, guiding the development of NotePlayer. Through a combination of a usage scenario and a user study, we validated the effectiveness of NotePlayer. The results show that the tool streamlines the video creation and facilitates the communication process for data analysts. + + + +### Tyche: Making Sense of Property-Based Testing Effectiveness +Authors: Harrison Goldstein, Jeffrey Tao, Zac Hatfield-Dodds, Benjamin Pierce, Andrew Head + +[Link](https://programs.sigchi.org/uist/2024/program/content/170922) + +Abstract: Software developers increasingly rely on automated methods to assess the +correctness of their code. One such method is property-based testing +(PBT), wherein a test harness generates hundreds or thousands of inputs +and checks the outputs of the program on those inputs using parametric +properties. Though powerful, PBT induces a sizable gulf of evaluation: +developers need to put in nontrivial effort to understand how well the +different test inputs exercise the software under test. To bridge this +gulf, we propose Tyche, a user interface that supports sensemaking +around the effectiveness of property-based tests. Guided by a formative +design exploration, our design of Tyche supports developers with +interactive, configurable views of test behavior with tight integrations +into modern developer testing workflow. These views help developers +explore global testing behavior and individual test inputs alike. To +accelerate the development of powerful, interactive PBT tools, we define +a standard for PBT test reporting and integrate it with a widely used +PBT library. A self-guided online usability study revealed that Tyche's +visualizations help developers to more accurately assess software +testing effectiveness. + + + + +### CoLadder: Manipulating Code Generation via Multi-Level Blocks +Authors: Ryan Yen, Jiawen Zhu, Sangho Suh, Haijun Xia, Jian Zhao + +[Link](https://programs.sigchi.org/uist/2024/program/content/171012) + +Abstract: This paper adopted an iterative design process to gain insights into programmers' strategies when using LLMs for programming. We proposed CoLadder, a novel system that supports programmers by facilitating hierarchical task decomposition, direct code segment manipulation, and result evaluation during prompt authoring. A user study with 12 experienced programmers showed that CoLadder is effective in helping programmers externalize their problem-solving intentions flexibly, improving their ability to evaluate and modify code across various abstraction levels, from their task's goal to final code implementation. + + + +### SQLucid: Grounding Natural Language Database Queries with Interactive Explanations +Authors: Yuan Tian, Jonathan Kummerfeld, Toby Li, Tianyi Zhang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170951) + +Abstract: Though recent advances in machine learning have led to significant improvements in natural language interfaces for databases, the accuracy and reliability of these systems remain limited, especially in high-stakes domains. This paper introduces SQLucid, a novel user interface that bridges the gap between non-expert users and complex database querying processes. SQLucid addresses existing limitations by integrating visual correspondence, intermediate query results, and editable step-by-step SQL explanations in natural language to facilitate user understanding and engagement. This unique blend of features empowers users to understand and refine SQL queries easily and precisely. Two user studies and one quantitative experiment were conducted to validate SQLucid’s effectiveness, showing significant improvement in task completion accuracy and user confidence compared to existing interfaces. Our code is available at https://github.com/magic-YuanTian/SQLucid. + + + + +## Next Gen Input +### PointerVol: A Laser Pointer for Swept Volumetric Displays +Authors: Unai Javier Fernández, Iosune Sarasate Azcona, Iñigo Ezcurdia, Manuel Lopez-Amo, Ivan Fernández, Asier Marzo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170724) + +Abstract: A laser pointer is a commonly used device that does not require communication with the display system or modifications on the applications, the presenter can just take a pointer and start using it. When a laser pointer is used on a volumetric display, a line rather than a point appears, making it not suitable for pointing at 3D locations. PointerVol is a modified laser pointer that allows users to point to 3D positions inside a swept volumetric display. We propose two PointerVol implementations based on timing and distance measurements, we evaluate the pointing performance using them. Finally, we present other features such as multi-user pointing, line patterns and a multi-finger wearable. PointerVol is a simple device that can help to popularize volumetric displays, or at least to make them more usable for presentations with true-3D content. + + + +### RFTIRTouch: Touch Sensing Device for Dual-sided Transparent Plane Based on Repropagated Frustrated Total Internal Reflection +Authors: Ratchanon Wattanaparinton, Kotaro Kitada, Kentaro Takemura + +[Link](https://programs.sigchi.org/uist/2024/program/content/170876) + +Abstract: Frustrated total internal reflection (FTIR) imaging is widely applied in various touch-sensing systems. However, vision-based touch sensing has structural constraints, and the system size tends to increase. Although a sensing system with reduced thickness has been developed recently using repropagated FTIR (RFTIR), it lacks the property of instant installation anywhere because observation from the side of a transparent medium is required. Therefore, this study proposes an "RFTIRTouch" sensing device to capture RFTIR images from the contact surface. RFTIRTouch detects the touch position on a dual-sided plane using a physics-based estimation and can be retrofitted to existing transparent media with simple calibration. Our evaluation experiments confirm that the touch position can be estimated within an error of approximately 2.1 mm under optimal conditions. Furthermore, several application examples are implemented to demonstrate the advantages of RFTIRTouch, such as its ability to measure dual sides with a single sensor and waterproof the contact surface. + + + +### IRIS: Wireless Ring for Vision-based Smart Home Interaction +Authors: Maruchi Kim, Antonio Glenn, Bandhav Veluri, Yunseo Lee, Eyoel Gebre, Aditya Bagaria, Shwetak Patel, Shyamnath Gollakota + +[Link](https://programs.sigchi.org/uist/2024/program/content/171018) + +Abstract: Integrating cameras into wireless smart rings has been challenging due to size and power constraints. We introduce IRIS, the first wireless vision-enabled smart ring system for smart home interactions. Equipped with a camera, Bluetooth radio, inertial measurement unit (IMU), and an onboard battery, IRIS meets the small size, weight, and power (SWaP) requirements for ring devices. IRIS is context-aware, adapting its gesture set to the detected device, and can last for 16-24 hours on a single charge. IRIS leverages the scene semantics to achieve instance-level device recognition. In a study involving 23 participants, IRIS consistently outpaced voice commands, with a higher proportion of participants expressing a preference for IRIS over voice commands regarding toggling a device's state, granular control, and social acceptability. Our work pushes the boundary of what is possible with ring form-factor devices, addressing system challenges and opening up novel interaction capabilities. + + + +### Silent Impact: Tracking Tennis Shots from the Passive Arm +Authors: Junyong Park, Saelyne Yang, Sungho Jo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170795) + +Abstract: Wearable technology has transformed sports analytics, offering new dimensions in enhancing player experience. Yet, many solutions involve cumbersome setups that inhibit natural motion. In tennis, existing products require sensors on the racket or dominant arm, causing distractions and discomfort. We propose Silent Impact, a novel and user-friendly system that analyzes tennis shots using a sensor placed on the passive arm. Collecting Inertial Measurement Unit sensor data from 20 recreational tennis players, we developed neural networks that exclusively utilize passive arm data to detect and classify six shots, achieving a classification accuracy of 88.2% and a detection F1 score of 86.0%, comparable to the dominant arm. These models were then incorporated into an end-to-end prototype, which records passive arm motion through a smartwatch and displays a summary of shots on a mobile app. User study (N=10) showed that participants felt less burdened physically and mentally using Silent Impact on the passive arm. Overall, our research establishes the passive arm as an effective, comfortable alternative for tennis shot analysis, advancing user-friendly sports analytics. + + + + +## LLM: New applications +### VoicePilot: Harnessing LLMs as Speech Interfaces for Assistive Robotics +Authors: Akhil Padmanabha, Jessie Yuan, Janavi Gupta, Zulekha Karachiwalla, Carmel Majidi, Henny Admoni, Zackory Erickson + +[Link](https://programs.sigchi.org/uist/2024/program/content/170789) + +Abstract: Physically assistive robots present an opportunity to significantly increase the well-being and independence of individuals with motor impairments or other forms of disability who are unable to complete activities of daily living. Speech interfaces, especially ones that utilize Large Language Models (LLMs), can enable individuals to effectively and naturally communicate high-level commands and nuanced preferences to robots. Frameworks for integrating LLMs as interfaces to robots for high level task planning and code generation have been proposed, but fail to incorporate human-centric considerations which are essential while developing assistive interfaces. In this work, we present a framework for incorporating LLMs as speech interfaces for physically assistive robots, constructed iteratively with 3 stages of testing involving a feeding robot, culminating in an evaluation with 11 older adults at an independent living facility. We use both quantitative and qualitative data from the final study to validate our framework and additionally provide design guidelines for using LLMs as speech interfaces for assistive robots. Videos, code, and supporting files are located on our project website\footnote{\url{https://sites.google.com/andrew.cmu.edu/voicepilot/}} + + + +### ComPeer: A Generative Conversational Agent for Proactive Peer Support +Authors: Tianjian Liu, Hongzheng Zhao, Yuheng Liu, Xingbo Wang, Zhenhui Peng + +[Link](https://programs.sigchi.org/uist/2024/program/content/170845) + +Abstract: Conversational Agents (CAs) acting as peer supporters have been widely studied and demonstrated beneficial for people's mental health. However, previous peer support CAs either are user-initiated or follow predefined rules to initiate the conversations, which may discourage users to engage and build relationships with the CAs for long-term benefits. In this paper, we develop ComPeer, a generative CA that can proactively offer adaptive peer support to users. ComPeer leverages large language models to detect and reflect significant events in the dialogue, enabling it to strategically plan the timing and content of proactive care. In addition, ComPeer incorporates peer support strategies, conversation history, and its persona into the generative messages. Our one-week between-subjects study (N=24) demonstrates ComPeer's strength in providing peer support over time and boosting users' engagement compared to a baseline user-initiated CA. We report users' interaction patterns with ComPeer and discuss implications for designing proactive generative agents to promote people's well-being. + + + +### SHAPE-IT: Exploring Text-to-Shape-Display for Generative Shape-Changing Behaviors with LLMs +Authors: Wanli Qian, Chenfeng Gao, Anup Sathya, Ryo Suzuki, Ken Nakagaki + +[Link](https://programs.sigchi.org/uist/2024/program/content/170820) + +Abstract: This paper introduces text-to-shape-display, a novel approach to generating dynamic shape changes in pin-based shape displays through natural language commands. By leveraging large language models (LLMs) and AI-chaining, our approach allows users to author shape-changing behaviors on demand through text prompts without programming. We describe the foundational aspects necessary for such a system, including the identification of key generative elements (primitive, animation, and interaction) and design requirements to enhance user interaction, based on formative exploration and iterative design processes. Based on these insights, we develop SHAPE-IT, an LLM-based authoring tool for a 24 x 24 shape display, which translates the user's textual command into executable code and allows for quick exploration through a web-based control interface. We evaluate the effectiveness of SHAPE-IT in two ways: 1) performance evaluation and 2) user evaluation (N= 10). The study conclusions highlight the ability to facilitate rapid ideation of a wide range of shape-changing behaviors with AI. However, the findings also expose accuracy-related challenges and limitations, prompting further exploration into refining the framework for leveraging AI to better suit the unique requirements of shape-changing systems. + + + +### WaitGPT: Monitoring and Steering Conversational LLM Agent in Data Analysis with On-the-Fly Code Visualization +Authors: Liwenhan Xie, Chengbo Zheng, Haijun Xia, Huamin Qu, Chen Zhu-Tian + +[Link](https://programs.sigchi.org/uist/2024/program/content/170828) + +Abstract: Large language models (LLMs) support data analysis through conversational user interfaces, as exemplified in OpenAI's ChatGPT (formally known as Advanced Data Analysis or Code Interpreter). Essentially, LLMs produce code for accomplishing diverse analysis tasks. However, presenting raw code can obscure the logic and hinder user verification. To empower users with enhanced comprehension and augmented control over analysis conducted by LLMs, we propose a novel approach to transform LLM-generated code into an interactive visual representation. In the approach, users are provided with a clear, step-by-step visualization of the LLM-generated code in real time, allowing them to understand, verify, and modify individual data operations in the analysis. Our design decisions are informed by a formative study (N=8) probing into user practice and challenges. We further developed a prototype named WaitGPT and conducted a user study (N=12) to evaluate its usability and effectiveness. The findings from the user study reveal that WaitGPT facilitates monitoring and steering of data analysis performed by LLMs, enabling participants to enhance error detection and increase their overall confidence in the results. + + + + +## Break Q&A: Haptics +### LoopBot: Representing Continuous Haptics of Grounded Objects in Room-scale VR +Authors: Tetsushi Ikeda, Kazuyuki Fujita, Kumpei Ogawa, Kazuki Takashima, Yoshifumi Kitamura + +[Link](https://programs.sigchi.org/uist/2024/program/content/171016) + +Abstract: In room-scale virtual reality, providing continuous haptic feedback from touching grounded objects, such as walls and handrails, has been challenging due to the user's walking range and the required force. In this study, we propose LoopBot, a novel technique to provide continuous haptic feedback from grounded objects using only a single user-following robot. Specifically, LoopBot is equipped with a loop-shaped haptic prop attached to an omnidirectional robot that scrolls to cancel out the robot's displacement, giving the user the haptic sensation that the prop is actually fixed in place, or ``grounded.'' We first introduce the interaction design space of LoopBot and, as one of its promising interaction scenarios, implement a prototype for the experience of walking while grasping handrails. A performance evaluation shows that scrolling the prop cancels $77.5\%$ of the robot's running speed on average. A preliminary user test ($N=10$) also shows that the subjective realism of the experience and the sense of the virtual handrails being grounded were significantly higher than when the prop was not scrolled. Based on these findings, we discuss possible further development of LoopBot. + + + +### JetUnit: Rendering Diverse Force Feedback in Virtual Reality Using Water Jets +Authors: Zining Zhang, Jiasheng Li, Zeyu Yan, Jun Nishida, Huaishu Peng + +[Link](https://programs.sigchi.org/uist/2024/program/content/170767) + +Abstract: We propose JetUnit, a water-based VR haptic system designed to produce force feedback with a wide spectrum of intensities and frequencies through water jets. The key challenge in designing this system lies in optimizing parameters to enable the haptic device to generate force feedback that closely replicates the most intense force produced by direct water jets while ensuring the user remains dry. In this paper, we present the key design parameters of the JetUnit wearable device determined through a set of quantitative experiments and a perception study. We further conducted a user study to assess the impact of integrating our haptic solutions into virtual reality experiences. The results revealed that, by adhering to the design principles of JetUnit, the water-based haptic system is capable of delivering diverse force feedback sensations, significantly enhancing the immersive experience in virtual reality. + + + +### Selfrionette: A Fingertip Force-Input Controller for Continuous Full-Body Avatar Manipulation and Diverse Haptic Interactions +Authors: Takeru Hashimoto, Yutaro Hirao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170833) + +Abstract: We propose Selfrionette, a controller that uses fingertip force input to drive avatar movements in virtual reality (VR). +This system enables users to interact with virtual objects and walk in VR using only fingertip force, overcoming physical and spatial constraints. Additionally, by fixing users' fingers, it provides users with counterforces equivalent to the applied force, allowing for diverse and wide dynamic range haptic feedback by adjusting the relationship between force input and virtual movement. +To evaluate the effectiveness of the proposed method, this paper focuses on hand interaction as a first step. +In User Study 1, we measured usability and embodiment during reaching tasks under Selfrionette, body tracking, and finger tracking conditions. +In User Study 2, we investigated whether users could perceive haptic properties such as weight, friction, and compliance under the same conditions as User Study 1. +Selfrionette was found to be comparable to body tracking in realism of haptic interaction, enabling embodied avatar experiences even in limited spatial conditions. + + + +### SpinShot: Optimizing Both Physical and Perceived Force Feedback of Flywheel-Based, Directional Impact Handheld Devices +Authors: Chia-An Fan, En-Huei Wu, Chia-Yu Cheng, Yu-Cheng Chang, Alvaro Lopez, Yu Chen, Chia-Chen Chi, Yi-Sheng Chan, Ching-Yi Tsai, Mike Chen + +[Link](https://programs.sigchi.org/uist/2024/program/content/170972) + +Abstract: Real-world impact, such as hitting a tennis ball and a baseball, generates instantaneous, directional impact forces. However, current ungrounded force feedback technologies, such as air jets and propellers, can only generate directional impulses that are 10x-10,000x weaker. We present SpinShot, a flywheel-based device with a solenoid-actuated stopper capable of generating directional impulse of 22Nm in 1ms, which is more than 10x stronger than prior ungrounded directional technologies. Furthermore, we present a novel force design that reverses the flywheel immediately after the initial impact, to significantly increase the perceived magnitude. We conducted a series of two formative, perceptual studies (n=16, 18), followed by a summative user experience study (n=16) that compared SpinShot vs. moving mass (solenoid) and vs. air jets in a VR baseball hitting game. Results showed that SpinShot significantly improved realism, immersion, magnitude (p < .01) compared to both baselines, but significantly reduced comfort vs. air jets primarily due to the 2.9x device weight. Overall, SpinShot was preferred by 63-75% of the participants. + + + + +## Break Q&A: Body as the interface +### MouthIO: Fabricating Customizable Oral User Interfaces with Integrated Sensing and Actuation +Authors: Yijing Jiang, Julia Kleinau, Till Max Eckroth, Eve Hoggan, Stefanie Mueller, Michael Wessely + +[Link](https://programs.sigchi.org/uist/2024/program/content/170798) + +Abstract: This paper introduces MouthIO, the first customizable intraoral user interface that can be equipped with various sensors and output components. MouthIO consists of an SLA-printed brace that houses a flexible PCB within a bite-proof enclosure positioned between the molar teeth and inner cheeks. Our MouthIO design and fabrication technique enables makers to customize the oral user interfaces in both form and function at low cost. All parts in contact with the oral cavity are made of bio-compatible materials to ensure safety, while the design takes into account both comfort and portability. We demonstrate MouthIO through three application examples ranging from beverage consumption monitoring, health monitoring, to assistive technology. Results from our full-day user study indicate high wearability and social acceptance levels, while our technical evaluation demonstrates the device's ability to withstand adult bite forces. + + + +### Can a Smartwatch Move Your Fingers? Compact and Practical Electrical Muscle Stimulation in a Smartwatch +HONORABLE_MENTION + +Authors: Akifumi Takahashi, Yudai Tanaka, Archit Tamhane, Alan Shen, Shan-Yuan Teng, Pedro Lopes + +[Link](https://programs.sigchi.org/uist/2024/program/content/170990) + +Abstract: Smartwatches gained popularity in the mainstream, making them into today’s de-facto wearables. Despite advancements in sensing, haptics on smartwatches is still restricted to tactile feedback (e.g., vibration). Most smartwatch-sized actuators cannot render strong force-feedback. Simultaneously, electrical muscle stimulation (EMS) promises compact force-feedback but, to actuate fingers requires users to wear many electrodes on their forearms. While forearm electrodes provide good accuracy, they detract EMS from being a practical force-feedback interface. To address this, we propose moving the electrodes to the wrist—conveniently packing them in the backside of a smartwatch. In our first study, we found that by cross-sectionally stimulating the wrist in 1,728 trials, we can actuate thumb extension, index extension & flexion, middle flexion, pinky flexion, and wrist flexion. Following, we engineered a compact EMS that integrates directly into a smartwatch’s wristband (with a custom stimulator, electrodes, demultiplexers, and communication). In our second study, we found that participants could calibrate our device by themselves ~50% faster than with conventional EMS. Furthermore, all participants preferred the experience of this device, especially for its social acceptability & practicality. We believe that our approach opens new applications for smartwatch-based interactions, such as haptic assistance during everyday tasks. + + + +### Power-over-Skin: Full-Body Wearables Powered By Intra-Body RF Energy +Authors: Andy Kong, Daehwa Kim, Chris Harrison + +[Link](https://programs.sigchi.org/uist/2024/program/content/170775) + +Abstract: Powerful computing devices are now small enough to be easily worn on the body. However, batteries pose a major design and user experience obstacle, adding weight and volume, and generally requiring periodic device removal and recharging. In response, we developed Power-over-Skin, an approach using the human body itself to deliver power to many distributed, battery-free, worn devices. We demonstrate power delivery from on-body distances as far as from head-to-toe, with sufficient energy to power microcontrollers capable of sensing and wireless communication. We share results from a study campaign that informed our implementation, as well as experiments that validate our final system. We conclude with several demonstration devices, ranging from input controllers to longitudinal bio-sensors, which highlight the efficacy and potential of our approach. + + + +### HandPad: Make Your Hand an On-the-go Writing Pad via Human Capacitance +Authors: Yu Lu, Dian Ding, Hao Pan, Yijie Li, Juntao Zhou, Yongjian Fu, Yongzhao Zhang, Yi-Chao Chen, Guangtao Xue + +[Link](https://programs.sigchi.org/uist/2024/program/content/170761) + +Abstract: The convenient text input system is a pain point for devices such as AR glasses, and it is difficult for existing solutions to balance portability and efficiency. This paper introduces HandPad, the system that turns the hand into an on-the-go touchscreen, which realizes interaction on the hand via human capacitance. HandPad achieves keystroke and handwriting inputs for letters, numbers, and Chinese characters, reducing the dependency on capacitive or pressure sensor arrays. Specifically, the system verifies the feasibility of touch point localization on the hand using the human capacitance model and proposes a handwriting recognition system based on Bi-LSTM and ResNet. The transfer learning-based system only needs a small amount of training data to build a handwriting recognition model for the target user. Experiments in real environments verify the feasibility of HandPad for keystroke (accuracy of 100%) and handwriting recognition for letters (accuracy of 99.1%), numbers (accuracy of 97.6%) and Chinese characters (accuracy of 97.9%). + + + + +## Break Q&A: Vision-based UIs +### Vision-Based Hand Gesture Customization from a Single Demonstration +Authors: Soroush Shahi, Vimal Mollyn, Cori Tymoszek Park, Runchang Kang, Asaf Liberman, Oron Levy, Jun Gong, Abdelkareem Bedri, Gierad Laput + +[Link](https://programs.sigchi.org/uist/2024/program/content/170938) + +Abstract: Hand gesture recognition is becoming a more prevalent mode of human-computer interaction, especially as cameras proliferate across everyday devices. Despite continued progress in this field, gesture customization is often underexplored. Customization is crucial since it enables users to define and demonstrate gestures that are more natural, memorable, and accessible. However, customization requires efficient usage of user-provided data. We introduce a method that enables users to easily design bespoke gestures with a monocular camera from one demonstration. We employ transformers and meta-learning techniques to address few-shot learning challenges. Unlike prior work, our method supports any combination of one-handed, two-handed, static, and dynamic gestures, including different viewpoints, and the ability to handle irrelevant hand movements. We implement three real-world applications using our customization method, conduct a user study, and achieve up to 94\% average recognition accuracy from one demonstration. Our work provides a viable path for vision-based gesture customization, laying the foundation for future advancements in this domain. + + + +### VirtualNexus: Enhancing 360-Degree Video AR/VR Collaboration with Environment Cutouts and Virtual Replicas +Authors: Xincheng Huang, Michael Yin, Ziyi Xia, Robert Xiao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170877) + +Abstract: Asymmetric AR/VR collaboration systems bring a remote VR user to a local AR user’s physical environment, allowing them to communicate and work within a shared virtual/physical space. Such systems often display the remote environment through 3D reconstructions or 360° videos. While 360° cameras stream an environment in higher quality, they lack spatial information, making them less interactable. We present VirtualNexus, an AR/VR collaboration system that enhances 360° video AR/VR collaboration with environment cutouts and virtual replicas. VR users can define cutouts of the remote environment to interact with as a world-in-miniature, and their interactions are synchronized to the local AR perspective. Furthermore, AR users can rapidly scan and share 3D virtual replicas of physical objects using neural rendering. We demonstrated our system’s utility through 3 example applications and evaluated our system in a dyadic usability test. VirtualNexus extends the interaction space of 360° telepresence systems, offering improved physical presence, versatility, and clarity in interactions. + + + +### Personal Time-Lapse +Authors: Nhan Tran, Ethan Yang, Angelique Taylor, Abe Davis + +[Link](https://programs.sigchi.org/uist/2024/program/content/170932) + +Abstract: Our bodies are constantly in motion—from the bending of arms and legs to the less conscious movement of breathing, our precise shape and location change constantly. This can make subtler developments (e.g., the growth of hair, or the healing of a wound) difficult to observe. Our work focuses on helping users record and visualize this type of subtle, longer-term change. We present a mobile tool that combines custom 3D tracking with interactive visual feedback and computational imaging to capture personal time-lapse, which approximates longer-term video of the subject (typically, part of the capturing user’s body) under a fixed viewpoint, body pose, and lighting condition. These personal time-lapses offer a powerful and detailed way to track visual changes of the subject over time. We begin with a formative study that examines what makes personal time-lapse so difficult to capture. Building on our findings, we motivate the design of our capture tool, evaluate this design with users, and demonstrate its effectiveness in a variety of challenging examples. + + + +### Chromaticity Gradient Mapping for Interactive Control of Color Contrast in Images and Video +Authors: Ruyu Yan, Jiatian Sun, Abe Davis + +[Link](https://programs.sigchi.org/uist/2024/program/content/170867) + +Abstract: We present a novel perceptually-motivated interactive tool for using color contrast to enhance details represented in the lightness channel of images and video. Our method lets users adjust the perceived contrast of different details by manipulating local chromaticity while preserving the original lightness of individual pixels. Inspired by the use of similar chromaticity mappings in painting, our tool effectively offers contrast along a user-selected gradient of chromaticities as additional bandwidth for representing and enhancing different details in an image. We provide an interface for our tool that closely resembles the familiar design of tonal contrast curve controls that are available in most professional image editing software. We show that our tool is effective for enhancing the perceived contrast of details without altering lightness in an image and present many examples of effects that can be achieved with our method on both images and video. + + + + +## Break Q&A: Next Gen Input +### PointerVol: A Laser Pointer for Swept Volumetric Displays +Authors: Unai Javier Fernández, Iosune Sarasate Azcona, Iñigo Ezcurdia, Manuel Lopez-Amo, Ivan Fernández, Asier Marzo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170724) + +Abstract: A laser pointer is a commonly used device that does not require communication with the display system or modifications on the applications, the presenter can just take a pointer and start using it. When a laser pointer is used on a volumetric display, a line rather than a point appears, making it not suitable for pointing at 3D locations. PointerVol is a modified laser pointer that allows users to point to 3D positions inside a swept volumetric display. We propose two PointerVol implementations based on timing and distance measurements, we evaluate the pointing performance using them. Finally, we present other features such as multi-user pointing, line patterns and a multi-finger wearable. PointerVol is a simple device that can help to popularize volumetric displays, or at least to make them more usable for presentations with true-3D content. + + + +### RFTIRTouch: Touch Sensing Device for Dual-sided Transparent Plane Based on Repropagated Frustrated Total Internal Reflection +Authors: Ratchanon Wattanaparinton, Kotaro Kitada, Kentaro Takemura + +[Link](https://programs.sigchi.org/uist/2024/program/content/170876) + +Abstract: Frustrated total internal reflection (FTIR) imaging is widely applied in various touch-sensing systems. However, vision-based touch sensing has structural constraints, and the system size tends to increase. Although a sensing system with reduced thickness has been developed recently using repropagated FTIR (RFTIR), it lacks the property of instant installation anywhere because observation from the side of a transparent medium is required. Therefore, this study proposes an "RFTIRTouch" sensing device to capture RFTIR images from the contact surface. RFTIRTouch detects the touch position on a dual-sided plane using a physics-based estimation and can be retrofitted to existing transparent media with simple calibration. Our evaluation experiments confirm that the touch position can be estimated within an error of approximately 2.1 mm under optimal conditions. Furthermore, several application examples are implemented to demonstrate the advantages of RFTIRTouch, such as its ability to measure dual sides with a single sensor and waterproof the contact surface. + + + +### IRIS: Wireless Ring for Vision-based Smart Home Interaction +Authors: Maruchi Kim, Antonio Glenn, Bandhav Veluri, Yunseo Lee, Eyoel Gebre, Aditya Bagaria, Shwetak Patel, Shyamnath Gollakota + +[Link](https://programs.sigchi.org/uist/2024/program/content/171018) + +Abstract: Integrating cameras into wireless smart rings has been challenging due to size and power constraints. We introduce IRIS, the first wireless vision-enabled smart ring system for smart home interactions. Equipped with a camera, Bluetooth radio, inertial measurement unit (IMU), and an onboard battery, IRIS meets the small size, weight, and power (SWaP) requirements for ring devices. IRIS is context-aware, adapting its gesture set to the detected device, and can last for 16-24 hours on a single charge. IRIS leverages the scene semantics to achieve instance-level device recognition. In a study involving 23 participants, IRIS consistently outpaced voice commands, with a higher proportion of participants expressing a preference for IRIS over voice commands regarding toggling a device's state, granular control, and social acceptability. Our work pushes the boundary of what is possible with ring form-factor devices, addressing system challenges and opening up novel interaction capabilities. + + + +### Silent Impact: Tracking Tennis Shots from the Passive Arm +Authors: Junyong Park, Saelyne Yang, Sungho Jo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170795) + +Abstract: Wearable technology has transformed sports analytics, offering new dimensions in enhancing player experience. Yet, many solutions involve cumbersome setups that inhibit natural motion. In tennis, existing products require sensors on the racket or dominant arm, causing distractions and discomfort. We propose Silent Impact, a novel and user-friendly system that analyzes tennis shots using a sensor placed on the passive arm. Collecting Inertial Measurement Unit sensor data from 20 recreational tennis players, we developed neural networks that exclusively utilize passive arm data to detect and classify six shots, achieving a classification accuracy of 88.2% and a detection F1 score of 86.0%, comparable to the dominant arm. These models were then incorporated into an end-to-end prototype, which records passive arm motion through a smartwatch and displays a summary of shots on a mobile app. User study (N=10) showed that participants felt less burdened physically and mentally using Silent Impact on the passive arm. Overall, our research establishes the passive arm as an effective, comfortable alternative for tennis shot analysis, advancing user-friendly sports analytics. + + + + +## Break Q&A: Future of Typing +### OptiBasePen: Mobile Base+Pen Input on Passive Surfaces by Sensing Relative Base Motion Plus Close-Range Pen Position +Authors: Andreas Fender, Mohamed Kari + +[Link](https://programs.sigchi.org/uist/2024/program/content/170879) + +Abstract: Digital pen input devices based on absolute pen position sensing, such as Wacom Pens, support high-fidelity pen input. However, they require specialized sensing surfaces like drawing tablets, which can have a large desk footprint, constrain the possible input area, and limit mobility. In contrast, digital pens with integrated relative sensing enable mobile use on passive surfaces, but suffer from motion artifacts or require surface contact at all times, deviating from natural pen affordances. We present OptiBasePen, a device for mobile pen input on ordinary surfaces. Our prototype consists of two parts: the "base" on which the hand rests and the pen for fine-grained input. The base features a high-precision mouse sensor to sense its own relative motion, and two infrared image sensors to track the absolute pen tip position within the base's frame of reference. This enables pen input on ordinary surfaces without external cameras while also avoiding drift from pen micro-movements. In this work, we present our prototype as well as the general base+pen concept, which combines relative and absolute sensing. + + + +### Palmrest+: Expanding Laptop Input Space with Shear Force on Palm-Resting Area +Authors: Jisu Yim, Seoyeon Bae, Taejun Kim, Sunbum Kim, Geehyuk Lee + +[Link](https://programs.sigchi.org/uist/2024/program/content/170781) + +Abstract: The palmrest area of laptops has the potential as an additional input space, considering its consistent palm contact during keyboard interaction. We propose Palmrest+, leveraging shear force exerted on the palmrest area. We suggest two input techniques: Palmrest Shortcut, for instant shortcut execution, and Palmrest Joystick, for continuous value input. These allow seamless and subtle input amidst keyboard typing. Evaluation of Palmrest Shortcut against conventional keyboard shortcuts revealed faster performance for applying shear force in unimanual and bimanual-manner with a significant reduction in gaze shifting. Additionally, the assessment of Palmrest Joystick against the laptop touchpad demonstrated comparable performance in selecting one- and two- dimensional targets with low-precision pointing, i.e., for short distances and large target sizes. The maximal hand displacement significantly decreased for both Palmrest Shortcut and Palmrest Joystick compared to conventional methods. These findings verify the feasibility and effectiveness of leveraging the palmrest area as an additional input space on laptops, offering promising enhanced typing-related user interaction experiences. + + + +### TouchInsight: Uncertainty-aware Rapid Touch and Text Input for Mixed Reality from Egocentric Vision +Authors: Paul Streli, Mark Richardson, Fadi Botros, Shugao Ma, Robert Wang, Christian Holz + +[Link](https://programs.sigchi.org/uist/2024/program/content/170817) + +Abstract: While passive surfaces offer numerous benefits for interaction in mixed reality, reliably detecting touch input solely from head-mounted cameras has been a long-standing challenge. Camera specifics, hand self-occlusion, and rapid movements of both head and fingers introduce considerable uncertainty about the exact location of touch events. Existing methods have thus not been capable of achieving the performance needed for robust interaction. +In this paper, we present a real-time pipeline that detects touch input from all ten fingers on any physical surface, purely based on egocentric hand tracking. Our method TouchInsight comprises a neural network to predict the moment of a touch event, the finger making contact, and the touch location. TouchInsight represents locations through a bivariate Gaussian distribution to account for uncertainties due to sensing inaccuracies, which we resolve through contextual priors to accurately infer intended user input. +We first evaluated our method offline and found that it locates input events with a mean error of 6.3 mm, and accurately detects touch events (F1=0.99) and identifies the finger used (F1=0.96). In an online evaluation, we then demonstrate the effectiveness of our approach for a core application of dexterous touch input: two-handed text entry. In our study, participants typed 37.0 words per minute with an uncorrected error rate of 2.9% on average. + + + +### Can Capacitive Touch Images Enhance Mobile Keyboard Decoding? +Authors: Piyawat Lertvittayakumjorn, Shanqing Cai, Billy Dou, Cedric Ho, Shumin Zhai + +[Link](https://programs.sigchi.org/uist/2024/program/content/170719) + +Abstract: Capacitive touch sensors capture the two-dimensional spatial profile (referred to as a touch heatmap) of a finger's contact with a mobile touchscreen. However, the research and design of touchscreen mobile keyboards -- one of the most speed and accuracy demanding touch interfaces -- has focused on the location of the touch centroid derived from the touch image heatmap as the input, discarding the rest of the raw spatial signals. In this paper, we investigate whether touch heatmaps can be leveraged to further improve the tap decoding accuracy for mobile touchscreen keyboards. Specifically, we developed and evaluated machine-learning models that interpret user taps by using the centroids and/or the heatmaps as their input and studied the contribution of the heatmaps to model performance. The results show that adding the heatmap into the input feature set led to 21.4% relative reduction of character error rates on average, compared to using the centroid alone. Furthermore, we conducted a live user study with the centroid-based and heatmap-based decoders built into Pixel 6 Pro devices and observed lower error rate, faster typing speed, and higher self-reported satisfaction score based on the heatmap-based decoder than the centroid-based decoder. These findings underline the promise of utilizing touch heatmaps for improving typing experience in mobile keyboards. + + + + +## Break Q&A: Storytime +### Story-Driven: Exploring the Impact of Providing Real-time Context Information on Automated Storytelling +Authors: Jan Henry Belz, Lina Weilke, Anton Winter, Philipp Hallgarten, Enrico Rukzio, Tobias Grosse-Puppendahl + +[Link](https://programs.sigchi.org/uist/2024/program/content/170763) + +Abstract: Stories have long captivated the human imagination with narratives that enrich our lives. Traditional storytelling methods are often static and not designed to adapt to the listener’s environment, which is full of dynamic changes. For instance, people often listen to stories in the form of podcasts or audiobooks while traveling in a car. Yet, conventional in-car storytelling systems do not embrace the adaptive potential of this space. The advent of generative AI is the key to creating content that is not just personalized but also responsive to the changing parameters of the environment. We introduce a novel system for interactive, real-time story narration that leverages environment and user context in correspondence with estimated arrival times to adjust the generated story continuously. Through two comprehensive real-world studies with a total of 30 participants in a vehicle, we assess the user experience, level of immersion, and perception of the environment provided by the prototype. Participants' feedback shows a significant improvement over traditional storytelling and highlights the importance of context information for generative storytelling systems. + + + +### Lumina: A Software Tool for Fostering Creativity in Designing Chinese Shadow Puppets +Authors: Zhihao Yao, Yao Lu, Qirui Sun, Shiqing Lyu, Hanxuan Li, Xing-Dong Yang, Xuezhu Wang, Guanhong Liu, Haipeng Mi + +[Link](https://programs.sigchi.org/uist/2024/program/content/170765) + +Abstract: Shadow puppetry, a culturally rich storytelling art, faces challenges transitioning to the digital realm. Creators in the early design phase struggle with crafting intricate patterns, textures, and basic animations while adhering to stylistic conventions - hindering creativity, especially for novices. This paper presents Lumina, a tool to facilitate the early Chinese shadow puppet design stage. Lumina provides contour templates, animations, scene editing tools, and machine-generated traditional puppet patterns. These features liberate creators from tedious tasks, allowing focus on the creative process. Developed based on a formative study with puppet creators, the web-based Lumina enables wide dissemination. An evaluation with 18 participants demonstrated Lumina's effectiveness and ease of use, with participants successfully creating designs spanning traditional themes to contemporary and science-fiction concepts. + + + +### PortalInk: 2.5D Visual Storytelling with SVG Parallax and Waypoint Transitions +Authors: Tongyu Zhou, Joshua Yang, Vivian Chan, Ji Won Chung, Jeff Huang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170783) + +Abstract: Efforts to expand the authoring of visual stories beyond the 2D canvas have commonly mapped flat imagery to 3D scenes or objects. This translation requires spatial reasoning, as artists must think in two spaces. We propose PortalInk, a tool for artists to craft and export 2.5D graphical stories while remaining in 2D space by using SVG transitions. This is achieved via a parallax effect that generates a sense of depth that can be further explored using pan and zoom interactions. Any canvas position can be saved and linked to in a closed drawn stroke, or "portal," allowing the artist to create spatially discontinuous, or even infinitely looping visual trajectories. We provide three case studies and a gallery to demonstrate how artists can naturally incorporate these interactions to craft immersive comics, as well as re-purpose them to support use cases beyond drawing such as animation, slide-based presentations, web design, and digital journalism. + + + +### DrawTalking: Building Interactive Worlds by Sketching and Speaking +Authors: Karl Rosenberg, Rubaiat Habib Kazi, Li-Yi Wei, Haijun Xia, Ken Perlin + +[Link](https://programs.sigchi.org/uist/2024/program/content/170730) + +Abstract: We introduce DrawTalking, an approach to building and controlling interactive worlds by sketching and speaking while telling stories. It emphasizes user control and flexibility, and gives programming-like capability without requiring code. An early open-ended study with our prototype shows that the mechanics resonate and are applicable to many creative-exploratory use cases, with the potential to inspire and inform research in future natural interfaces for creative exploration and authoring. + + + +### Patchview: LLM-powered Worldbuilding with Generative Dust and Magnet Visualization +Authors: John Chung, Max Kreminski + +[Link](https://programs.sigchi.org/uist/2024/program/content/170729) + +Abstract: Large language models (LLMs) can help writers build story worlds by generating world elements, such as factions, characters, and locations. However, making sense of many generated elements can be overwhelming. Moreover, if the user wants to precisely control aspects of generated elements that are difficult to specify verbally, prompting alone may be insufficient. We introduce Patchview, a customizable LLM-powered system that visually aids worldbuilding by allowing users to interact with story concepts and elements through the physical metaphor of magnets and dust. Elements in Patchview are visually dragged closer to concepts with high relevance, facilitating sensemaking. The user can also steer the generation with verbally elusive concepts by indicating the desired position of the element between concepts. When the user disagrees with the LLM's visualization and generation, they can correct those by repositioning the element. These corrections can be used to align the LLM's future behaviors to the user's perception. With a user study, we show that Patchview supports the sensemaking of world elements and steering of element generation, facilitating exploration during the worldbuilding process. Patchview provides insights on how customizable visual representation can help sensemake, steer, and align generative AI model behaviors with the user's intentions. + + + +### An Interactive System for Suporting Creative Exploration of Cinematic Composition Designs +Authors: Rui He, Huaxin Wei, Ying Cao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170806) + +Abstract: Designing cinematic compositions, which involves moving cameras through a scene, is essential yet challenging in filmmaking. Machinima filmmaking provides real-time virtual environments for exploring different compositions flexibly and efficiently. However, producing high-quality cinematic compositions in such environments still requires significant cinematography skills and creativity. This paper presents Cinemassist, a tool designed to support and enhance this creative process by generating a variety of cinematic composition proposals at both keyframe and scene levels, which users can incorporate into their workflows and achieve more creative results. At the crux of our system is a deep generative model trained on real movie data, which can generate plausible, diverse camera poses conditioned on 3D animations and additional input semantics. Our model enables an interactive cinematic composition design workflow where users can co-design with the model by being inspired by model-generated suggestions while having control over the generation process. Our user study and expert rating find Cinemassist can facilitate the design process for users of different backgrounds and enhance the design quality especially for users with animation expertise, demonstrating its potential as an invaluable tool in the context of digital filmmaking. + + + + +## Break Q&A: Manipulating Text +### Beyond the Chat: Executable and Verifiable Text-Editing with LLMs +Authors: Philippe Laban, Jesse Vig, Marti Hearst, Caiming Xiong, Chien-Sheng Wu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170790) + +Abstract: Conversational interfaces powered by Large Language Models (LLMs) have recently become a popular way to obtain feedback during document editing. However, standard chat-based conversational interfaces cannot explicitly surface the editing changes that they suggest. To give the author more control when editing with an LLM, we present InkSync, an editing interface that suggests executable edits directly within the document being edited. Because LLMs are known to introduce factual errors, Inksync also supports a 3-stage approach to mitigate this risk: Warn authors when a suggested edit introduces new information, help authors Verify the new information's accuracy through external search, and allow a third party to Audit with a-posteriori verification via a trace of all auto-generated content. +Two usability studies confirm the effectiveness of InkSync's components when compared to standard LLM-based chat interfaces, leading to more accurate and more efficient editing, and improved user experience. + + + +### ScriptViz: A Visualization Tool to Aid Scriptwriting based on a Large Movie Database +Authors: Anyi Rao, Jean-Peïc Chou, Maneesh Agrawala + +[Link](https://programs.sigchi.org/uist/2024/program/content/170838) + +Abstract: Scriptwriters usually rely on their mental visualization to create a vivid story by using their imagination to see, feel, and experience the scenes they are writing. Besides mental visualization, they often refer to existing images or scenes in movies and analyze the visual elements to create a certain mood or atmosphere. In this paper, we develop a new tool, ScriptViz, to provide external visualization based on a large movie database for the screenwriting process. It retrieves reference visuals on the fly based on scripts’ text and dialogue from a large movie database. The tool provides two types of control on visual elements that enable writers to 1) see exactly what they want with fixed visual elements and 2) see variances in uncertain elements. User evaluation among 15 scriptwriters shows that ScriptViz is able to present scriptwriters with consistent yet diverse visual possibilities, aligning closely with their scripts and helping their creation. + + + + +### SkipWriter: LLM-Powered Abbreviated Writing on Tablets +Authors: Zheer Xu, Shanqing Cai, Mukund Varma T, Subhashini Venugopalan, Shumin Zhai + +[Link](https://programs.sigchi.org/uist/2024/program/content/170930) + +Abstract: Large Language Models (LLMs) may offer transformative opportunities for text input, especially for physically demanding modalities like handwriting. We studied a form of abbreviated handwriting by designing, developing, and evaluating a prototype, named SkipWriter, that converts handwritten strokes of a variable-length prefix-based abbreviation (e.g. "ho a y" as handwritten strokes) into the intended full phrase (e.g., "how are you" in the digital format) based on the preceding context. SkipWriter consists of an in-production handwriting recognizer and an LLM fine-tuned on this task. With flexible pen input, SkipWriter allows the user to add and revise prefix strokes when predictions do not match the user's intent. An user evaluation demonstrated a 60% reduction in motor movements with an average speed of 25.78 WPM. We also showed that this reduction is close to the ceiling of our model in an offline simulation. + + + +### Bluefish: Composing Diagrams with Declarative Relations +Authors: Josh Pollock, Catherine Mei, Grace Huang, Elliot Evans, Daniel Jackson, Arvind Satyanarayan + +[Link](https://programs.sigchi.org/uist/2024/program/content/170824) + +Abstract: Diagrams are essential tools for problem-solving and communication as they externalize conceptual structures using spatial relationships. But when picking a diagramming framework, users are faced with a dilemma. They can either use a highly expressive but low-level toolkit, whose API does not match their domain-specific concepts, or select a high-level typology, which offers a recognizable vocabulary but supports a limited range of diagrams. To address this gap, we introduce Bluefish: a diagramming framework inspired by component-based user interface (UI) libraries. Bluefish lets users create diagrams using relations: declarative, composable, and extensible diagram fragments that relax the concept of a UI component. Unlike a component, a relation does not have sole ownership over its children nor does it need to fully specify their layout. To render diagrams, Bluefish extends a traditional tree-based scenegraph to a compound graph that captures both hierarchical and adjacent relationships between nodes. To evaluate our system, we construct a diverse example gallery covering many domains including mathematics, physics, computer science, and even cooking. We show that Bluefish's relations are effective declarative primitives for diagrams. Bluefish is open source, and we aim to shape it into both a usable tool and a research platform. + + + + +## Break Q&A: Hot Interfaces +### Fiery Hands: Designing Thermal Glove through Thermal and Tactile Integration for Virtual Object Manipulation +Authors: Haokun Wang, Yatharth Singhal, Hyunjae Gil, Jin Ryong Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170880) + +Abstract: We present a novel approach to render thermal and tactile feedback to the palm and fingertips through thermal and tactile integration. Our approach minimizes the obstruction of the palm and inner side of the fingers and enables virtual object manipulation while providing localized and global thermal feedback. By leveraging thermal actuators positioned strategically on the outer palm and back of the fingers in interplay with tactile actuators, our approach exploits thermal referral and tactile masking phenomena. Through a series of user studies, we validate the perception of localized thermal sensations across the palm and fingers, showcasing the ability to generate diverse thermal patterns. Furthermore, we demonstrate the efficacy of our approach in VR applications, replicating diverse thermal interactions with virtual objects. This work represents significant progress in thermal interactions within VR, offering enhanced sensory immersion at an optimal energy cost. + + + +### DexteriSync: A Hand Thermal I/O Exoskeleton for Morphing Finger Dexterity Experience +Authors: Ximing Shen, Youichi Kamiyama, Kouta Minamizawa, Jun Nishida + +[Link](https://programs.sigchi.org/uist/2024/program/content/170898) + +Abstract: Skin temperature is an important physiological factor for human hand dexterity. Leveraging this feature, we engineered an exoskeleton, called DexteriSync, that can dynamically adjust the user's finger dexterity and induce different thermal perceptions by modulating finger skin temperature. This exoskeleton comprises flexible silicone-copper tube segments, 3D-printed finger sockets, a 3D-printed palm base, a pump system, and a water temperature control with a storage unit. By realising an embodied experience of compromised dexterity, DexteriSync can help product designers understand the lived experience of compromised hand dexterity, such as that of the elderly and/or neurodivergent users, when designing daily necessities for them. We validated DexteriSync via a technical evaluation and two user studies, demonstrating that it can change skin temperature, dexterity, and thermal perception. An exploratory session with design students and an autistic compromised dexterity individual, demonstrated the exoskeleton provided a more realistic experience compared to video education, and allowed them to gain higher confidence in their designs. The results advocated for the efficacy of experiencing embodied compromised finger dexterity, which can promote an understanding of the related physical challenges and lead to a more persuasive design for assistive tools. + + + +### Flip-Pelt: Motor-Driven Peltier Elements for Rapid Thermal Stimulation and Congruent Pressure Feedback in Virtual Reality +Authors: Seongjun Kang, Gwangbin Kim, Seokhyun Hwang, Jeongju Park, Ahmed Elsharkawy, SeungJun Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170885) + +Abstract: This study introduces "Flip-Pelt," a motor-driven peltier device designed to provide rapid thermal stimulation and congruent pressure feedback in virtual reality (VR) environments. Our system incorporates eight motor-driven peltier elements, allowing for the flipping of preheated or cooled elements to the opposite side. In evaluating the Flip-Pelt device, we assess user ability to distinguish between heat/cold sources by their patterns and stiffness, and its impact on enhancing haptic experiences in VR content that involves contact with various thermal sources. Our findings demonstrate that rapid thermal stimulation and congruent pressure feedback provided by Flip-Pelt enhance the recognition accuracy of thermal patterns and the stiffness of virtual objects. These features also improve haptic experiences in VR scenarios through their temporal congruency between tactile and thermal stimuli. Additionally, we discuss the scalability of the Flip-Pelt system to other body parts by proposing design prototypes. + + + +### Hydroptical Thermal Feedback: Spatial Thermal Feedback Using Visible Lights and Water +Authors: Sosuke Ichihashi, Masahiko Inami, Hsin-Ni Ho, Noura Howell + +[Link](https://programs.sigchi.org/uist/2024/program/content/170722) + +Abstract: We control the temperature of materials in everyday interactions, recognizing temperature's important influence on our bodies, minds, and experiences. However, thermal feedback is an under-explored modality in human-computer interaction partly due to its limited temporal (slow) and spatial (small-area and non-moving) capabilities. We introduce hydroptical thermal feedback, a spatial thermal feedback method that works by applying visible lights on body parts in water. Through physical measurements and psychophysical experiments, our results show: (1) Humans perceive thermal sensations when visible lights are cast on the skin under water, and perceived warmth is greater for lights with shorter wavelengths, (2) temporal capabilities, (3) apparent motion (spatial) of warmth and coolness sensations, and (4) hydroptical thermal feedback can support the perceptual illusion that the water itself is warmer. We propose applications, including virtual reality (VR), shared water experiences, and therapies. Overall, this paper contributes hydroptical thermal feedback as a novel method, empirical results demonstrating its unique capabilities, proposed applications, and design recommendations for using hydroptical thermal feedback. Our method introduces controlled, spatial thermal perceptions to water experiences. + + + + +## Break Q&A: LLM: New applications +### VoicePilot: Harnessing LLMs as Speech Interfaces for Assistive Robotics +Authors: Akhil Padmanabha, Jessie Yuan, Janavi Gupta, Zulekha Karachiwalla, Carmel Majidi, Henny Admoni, Zackory Erickson + +[Link](https://programs.sigchi.org/uist/2024/program/content/170789) + +Abstract: Physically assistive robots present an opportunity to significantly increase the well-being and independence of individuals with motor impairments or other forms of disability who are unable to complete activities of daily living. Speech interfaces, especially ones that utilize Large Language Models (LLMs), can enable individuals to effectively and naturally communicate high-level commands and nuanced preferences to robots. Frameworks for integrating LLMs as interfaces to robots for high level task planning and code generation have been proposed, but fail to incorporate human-centric considerations which are essential while developing assistive interfaces. In this work, we present a framework for incorporating LLMs as speech interfaces for physically assistive robots, constructed iteratively with 3 stages of testing involving a feeding robot, culminating in an evaluation with 11 older adults at an independent living facility. We use both quantitative and qualitative data from the final study to validate our framework and additionally provide design guidelines for using LLMs as speech interfaces for assistive robots. Videos, code, and supporting files are located on our project website\footnote{\url{https://sites.google.com/andrew.cmu.edu/voicepilot/}} + + + +### ComPeer: A Generative Conversational Agent for Proactive Peer Support +Authors: Tianjian Liu, Hongzheng Zhao, Yuheng Liu, Xingbo Wang, Zhenhui Peng + +[Link](https://programs.sigchi.org/uist/2024/program/content/170845) + +Abstract: Conversational Agents (CAs) acting as peer supporters have been widely studied and demonstrated beneficial for people's mental health. However, previous peer support CAs either are user-initiated or follow predefined rules to initiate the conversations, which may discourage users to engage and build relationships with the CAs for long-term benefits. In this paper, we develop ComPeer, a generative CA that can proactively offer adaptive peer support to users. ComPeer leverages large language models to detect and reflect significant events in the dialogue, enabling it to strategically plan the timing and content of proactive care. In addition, ComPeer incorporates peer support strategies, conversation history, and its persona into the generative messages. Our one-week between-subjects study (N=24) demonstrates ComPeer's strength in providing peer support over time and boosting users' engagement compared to a baseline user-initiated CA. We report users' interaction patterns with ComPeer and discuss implications for designing proactive generative agents to promote people's well-being. + + + +### SHAPE-IT: Exploring Text-to-Shape-Display for Generative Shape-Changing Behaviors with LLMs +Authors: Wanli Qian, Chenfeng Gao, Anup Sathya, Ryo Suzuki, Ken Nakagaki + +[Link](https://programs.sigchi.org/uist/2024/program/content/170820) + +Abstract: This paper introduces text-to-shape-display, a novel approach to generating dynamic shape changes in pin-based shape displays through natural language commands. By leveraging large language models (LLMs) and AI-chaining, our approach allows users to author shape-changing behaviors on demand through text prompts without programming. We describe the foundational aspects necessary for such a system, including the identification of key generative elements (primitive, animation, and interaction) and design requirements to enhance user interaction, based on formative exploration and iterative design processes. Based on these insights, we develop SHAPE-IT, an LLM-based authoring tool for a 24 x 24 shape display, which translates the user's textual command into executable code and allows for quick exploration through a web-based control interface. We evaluate the effectiveness of SHAPE-IT in two ways: 1) performance evaluation and 2) user evaluation (N= 10). The study conclusions highlight the ability to facilitate rapid ideation of a wide range of shape-changing behaviors with AI. However, the findings also expose accuracy-related challenges and limitations, prompting further exploration into refining the framework for leveraging AI to better suit the unique requirements of shape-changing systems. + + + +### WaitGPT: Monitoring and Steering Conversational LLM Agent in Data Analysis with On-the-Fly Code Visualization +Authors: Liwenhan Xie, Chengbo Zheng, Haijun Xia, Huamin Qu, Chen Zhu-Tian + +[Link](https://programs.sigchi.org/uist/2024/program/content/170828) + +Abstract: Large language models (LLMs) support data analysis through conversational user interfaces, as exemplified in OpenAI's ChatGPT (formally known as Advanced Data Analysis or Code Interpreter). Essentially, LLMs produce code for accomplishing diverse analysis tasks. However, presenting raw code can obscure the logic and hinder user verification. To empower users with enhanced comprehension and augmented control over analysis conducted by LLMs, we propose a novel approach to transform LLM-generated code into an interactive visual representation. In the approach, users are provided with a clear, step-by-step visualization of the LLM-generated code in real time, allowing them to understand, verify, and modify individual data operations in the analysis. Our design decisions are informed by a formative study (N=8) probing into user practice and challenges. We further developed a prototype named WaitGPT and conducted a user study (N=12) to evaluate its usability and effectiveness. The findings from the user study reveal that WaitGPT facilitates monitoring and steering of data analysis performed by LLMs, enabling participants to enhance error detection and increase their overall confidence in the results. + + + + +## Break Q&A: Big to Small Fab +### Don't Mesh Around: Streamlining Manual-Digital Fabrication Workflows with Domain-Specific 3D Scanning +Authors: Ilan Moyer, Sam Bourgault, Devon Frost, Jennifer Jacobs + +[Link](https://programs.sigchi.org/uist/2024/program/content/170846) + +Abstract: Software-first digital fabrication workflows are often at odds with material-driven approaches to design. Material-driven design is especially critical in manual ceramics, where the craftsperson shapes the form through hands-on engagement. We present the Craft-Aligned Scanner (CAS), a 3D scanning and clay-3D printing system that enables practitioners to design for digital fabrication through traditional pottery techniques. The CAS augments a pottery wheel that has 3D printing capabilities with a precision distance sensor on a vertically oriented linear axis. By increasing the height of the sensor as the wheel turns, we directly synthesize a 3D spiralized toolpath from the geometry of the object on the wheel, enabling the craftsperson to immediately transition from manual fabrication to 3D printing without leaving the tool. We develop new digital fabrication workflows with CAS to augment scanned forms with functional features and add both procedurally and real-time-generated surface textures. CAS demonstrates how 3D printers can support material-first digital fabrication design without foregoing the expressive possibilities of software-based design. + + + +### E-Joint: Fabrication of Large-Scale Interactive Objects Assembled by 3D Printed Conductive Parts with Copper Plated Joints +Authors: Xiaolong Li, Cheng Yao, Shang Shi, Shuyue Feng, Yujie Zhou, Haoye Dong, Shichao Huang, Xueyan Cai, Kecheng Jin, Fangtian Ying, Guanyun Wang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170987) + +Abstract: The advent of conductive thermoplastic filaments and multi-material 3D printing has made it feasible to create interactive 3D printed objects. Yet, challenges arise due to volume constraints of desktop 3D printers and high resistive characteristics of current conductive materials, making the fabrication of large-scale or highly conductive interactive objects can be daunting. We propose E-Joint, a novel fabrication pipeline for 3D printed objects utilizing mortise and tenon joint structures combined with a copper plating process. The segmented pieces and joint structures are customized in software along with integrated circuits. Then electroplate them for enhanced conductivity. We designed four distinct electrified joint structures in experiment and evaluated the practical feasibility and effectiveness of fabricating pipes. By constructing three applications with those structures, we verified the usability of E-Joint in making large-scale interactive objects and show path to a more integrated future for manufacturing. + + + +### MobiPrint: A Mobile 3D Printer for Environment-Scale Design and Fabrication +Authors: Daniel Campos Zamora, Liang He, Jon Froehlich + +[Link](https://programs.sigchi.org/uist/2024/program/content/170934) + +Abstract: 3D printing is transforming how we customize and create physical objects in engineering, accessibility, and art. However, this technology is still primarily limited to confined working areas and dedicated print beds thereby detaching design and fabrication from real-world environments and making measuring and scaling objects tedious and labor-intensive. In this paper, we present MobiPrint, a prototype mobile fabrication system that combines elements from robotics, architecture, and Human-Computer Interaction (HCI) to enable environment-scale design and fabrication in ad-hoc indoor environments. MobiPrint provides a multi-stage fabrication pipeline: first, the robotic 3D printer automatically scans and maps an indoor space; second, a custom design tool converts the map into an interactive CAD canvas for editing and placing models in the physical world; finally, the MobiPrint robot prints the object directly on the ground at the defined location. Through a "proof-by-demonstration" validation, we highlight our system's potential across different applications, including accessibility, home furnishing, floor signage, and art. We also conduct a technical evaluation to assess MobiPrint’s localization accuracy, ground surface adhesion, payload capacity, and mapping speed. We close with a discussion of open challenges and opportunities for the future of contextualized mobile fabrication. + + + +### StructCurves: Interlocking Block-Based Line Structures +Authors: Zezhou Sun, Devin Balkcom, Emily Whiting + +[Link](https://programs.sigchi.org/uist/2024/program/content/171006) + +Abstract: We present a new class of curved block-based line structures whose component chains are flexible when separated, and provably rigid when assembled together into an interlocking double chain. The joints are inspired by traditional zippers, where a binding fabric or mesh connects individual teeth. +Unlike traditional zippers, the joint design produces a rigid interlock with programmable curvature. This allows fairly strong curved structures to be built out of easily stored flexible chains. +In this paper, we introduce a pipeline for generating these curved structures using a novel block design template based on revolute joints. +Mesh embedded in these structures maintains block spacing and assembly order. We evaluate the rigidity of the curved structures through mechanical performance testing and demonstrate several applications. + + + + +## Break Q&A: Shared Spaces +### BlendScape: Enabling End-User Customization of Video-Conferencing Environments through Generative AI +HONORABLE_MENTION + +Authors: Shwetha Rajaram, Nels Numan, Bala Kumaravel, Nicolai Marquardt, Andrew Wilson + +[Link](https://programs.sigchi.org/uist/2024/program/content/170854) + +Abstract: Today’s video-conferencing tools support a rich range of professional and social activities, but their generic meeting environments cannot be dynamically adapted to align with distributed collaborators’ needs. To enable end-user customization, we developed BlendScape, a rendering and composition system for video-conferencing participants to tailor environments to their meeting context by leveraging AI image generation techniques. BlendScape supports flexible representations of task spaces by blending users’ physical or digital backgrounds into unified environments and implements multimodal interaction techniques to steer the generation. Through an exploratory study with 15 end-users, we investigated whether and how they would find value in using generative AI to customize video-conferencing environments. Participants envisioned using a system like BlendScape to facilitate collaborative activities in the future, but required further controls to mitigate distracting or unrealistic visual elements. We implemented scenarios to demonstrate BlendScape's expressiveness for supporting environment design strategies from prior work and propose composition techniques to improve the quality of environments. + + + +### SpaceBlender: Creating Context-Rich Collaborative Spaces Through Generative 3D Scene Blending +Authors: Nels Numan, Shwetha Rajaram, Bala Kumaravel, Nicolai Marquardt, Andrew Wilson + +[Link](https://programs.sigchi.org/uist/2024/program/content/170843) + +Abstract: There is increased interest in using generative AI to create 3D spaces for virtual reality (VR) applications. However, today’s models produce artificial environments, falling short of supporting collaborative tasks that benefit from incorporating the user's physical context. To generate environments that support VR telepresence, we introduce SpaceBlender, a novel pipeline that utilizes generative AI techniques to blend users' physical surroundings into unified virtual spaces. This pipeline transforms user-provided 2D images into context-rich 3D environments through an iterative process consisting of depth estimation, mesh alignment, and diffusion-based space completion guided by geometric priors and adaptive text prompts. In a preliminary within-subjects study, where 20 participants performed a collaborative VR affinity diagramming task in pairs, we compared SpaceBlender with a generic virtual environment and a state-of-the-art scene generation framework, evaluating its ability to create virtual spaces suitable for collaboration. Participants appreciated the enhanced familiarity and context provided by SpaceBlender but also noted complexities in the generative environments that could detract from task focus. Drawing on participant feedback, we propose directions for improving the pipeline and discuss the value and design of blended spaces for different scenarios. + + + +### MyWebstrates: Webstrates as Local-first Software +Authors: Clemens Klokmose, James Eagan, Peter van Hardenberg + +[Link](https://programs.sigchi.org/uist/2024/program/content/170812) + +Abstract: Webstrates are web substrates, a practical realization of shareable dynamic media under which distributability, shareability, and malleability are fundamental software principles. Webstrates blur the distinction between application and document in a way that enables users to share, repurpose, and refit software across a variety of domains, but its reliance on a central server constrains its use; it is at odds with personal and collective control of data; and limits applications to the web. We extend the fundamental principles to include interoperability and sovereignty over data and propose MyWebstrates, an implementation of Webstrates on top of a new, lower-level substrate for synchronization built around local-first software principles. MyWebstrates registers itself in the user’s browser and function as a piece of local software that can selectively synchronise data over sync servers or peer-to-peer connections. We show how MyWebstrates extends Webstrates to enable offline collaborative use, interoperate between Webstrates on non-web technologies such as Unity, and maintain personal and collective sovereignty over data. We demonstrate how this enables new types of applications of Webstrates and discuss limitations of this approach and new challenges that it reveals. + + + +### SituationAdapt: Contextual UI Optimization in Mixed Reality with Situation Awareness via LLM Reasoning +Authors: Zhipeng Li, Christoph Gebhardt, Yves Inglin, Nicolas Steck, Paul Streli, Christian Holz + +[Link](https://programs.sigchi.org/uist/2024/program/content/170856) + +Abstract: Mixed Reality is increasingly used in mobile settings beyond controlled home and office spaces. This mobility introduces the need for user interface layouts that adapt to varying contexts. However, existing adaptive systems are designed only for static environments. In this paper, we introduce SituationAdapt, a system that adjusts Mixed Reality UIs to real-world surroundings by considering environmental and social cues in shared settings. Our system consists of perception, reasoning, and optimization modules for UI adaptation. Our perception module identifies objects and individuals around the user, while our reasoning module leverages a Vision-and-Language Model to assess the placement of interactive UI elements. This ensures that adapted layouts do not obstruct relevant environmental cues or interfere with social norms. Our optimization module then generates Mixed Reality interfaces that account for these considerations as well as temporal constraints The evaluation of SituationAdapt is two-fold: We first validate our reasoning component’s capability in assessing UI contexts comparable to human expert users. In an online user study, we then established our system’s capability of producing context-aware MR layouts, where it outperformed adaptive methods from previous work. We further demonstrate the versatility and applicability of SituationAdapt with a set of application scenarios. + + + +### Desk2Desk: Optimization-based Mixed Reality Workspace Integration for Remote Side-by-side Collaboration +Authors: Ludwig Sidenmark, Tianyu Zhang, Leen Al Lababidi, Jiannan Li, Tovi Grossman + +[Link](https://programs.sigchi.org/uist/2024/program/content/170830) + +Abstract: Mixed Reality enables hybrid workspaces where physical and virtual monitors are adaptively created and moved to suit the current environment and needs. However, in shared settings, individual users’ workspaces are rarely aligned and can vary significantly in the number of monitors, available physical space, and workspace layout, creating inconsistencies between workspaces which may cause confusion and reduce collaboration. We present Desk2Desk, an optimization-based approach for remote collaboration in which the hybrid workspaces of two collaborators are fully integrated to enable immersive side-by-side collaboration. The optimization adjusts each user’s workspace in layout and number of shared monitors and creates a mapping between workspaces to handle inconsistencies between workspaces due to physical constraints (e.g. physical monitors). We show in a user study how our system adaptively merges dissimilar physical workspaces to enable immersive side-by-side collaboration, and demonstrate how an optimization-based approach can effectively address dissimilar physical layouts. + + + + + +## Break Q&A: Learning to Learn +### Patterns of Hypertext-Augmented Sensemaking +Authors: Siyi Zhu, Robert Haisfield, Brendan Langen, Joel Chan + +[Link](https://programs.sigchi.org/uist/2024/program/content/170882) + +Abstract: The early days of HCI were marked by bold visions of hypertext as a transformative medium for augmented sensemaking, exemplified in systems like Memex, Xanadu, and NoteCards. Today, however, hypertext is often disconnected from discussions of the future of sensemaking. In this paper, we investigate how the recent resurgence in hypertext ``tools for thought'' might point to new directions for hypertext-augmented sensemaking. Drawing on detailed analyses of guided tours with 23 scholars, we describe hypertext-augmented use patterns for dealing with the core problem of revisiting and reusing existing/past ideas during scholarly sensemaking. We then discuss how these use patterns validate and extend existing knowledge of hypertext design patterns for sensemaking, and point to new design opportunities for augmented sensemaking. + + + +### Augmented Physics: Creating Interactive and Embedded Physics Simulations from Static Textbook Diagrams +BEST_PAPER + +Authors: Aditya Gunturu, Yi Wen, Nandi Zhang, Jarin Thundathil, Rubaiat Habib Kazi, Ryo Suzuki + +[Link](https://programs.sigchi.org/uist/2024/program/content/170907) + +Abstract: We introduce Augmented Physics, a machine learning-integrated authoring tool designed for creating embedded interactive physics simulations from static textbook diagrams. Leveraging recent advancements in computer vision, such as Segment Anything and Multi-modal LLMs, our web-based system enables users to semi-automatically extract diagrams from physics textbooks and generate interactive simulations based on the extracted content. These interactive diagrams are seamlessly integrated into scanned textbook pages, facilitating interactive and personalized learning experiences across various physics concepts, such as optics, circuits, and kinematics. Drawing from an elicitation study with seven physics instructors, we explore four key augmentation strategies: 1) augmented experiments, 2) animated diagrams, 3) bi-directional binding, and 4) parameter visualization. We evaluate our system through technical evaluation, a usability study (N=12), and expert interviews (N=12). Study findings suggest that our system can facilitate more engaging and personalized learning experiences in physics education. + + + +### Qlarify: Recursively Expandable Abstracts for Dynamic Information Retrieval over Scientific Papers +Authors: Raymond Fok, Joseph Chee Chang, Tal August, Amy Zhang, Daniel Weld + +[Link](https://programs.sigchi.org/uist/2024/program/content/170964) + +Abstract: Navigating the vast scientific literature often starts with browsing a paper’s abstract. However, when a reader seeks additional information, not present in the abstract, they face a costly cognitive chasm during their dive into the full text. To bridge this gap, we introduce recursively expandable abstracts, a novel interaction paradigm that dynamically expands abstracts by progressively incorporating additional information from the papers’ full text. This lightweight interaction allows scholars to specify their information needs by quickly brushing over the abstract or selecting AI-suggested expandable entities. Relevant information is synthesized using a retrieval-augmented generation approach, presented as a fluid, threaded expansion of the abstract, and made efficiently verifiable via attribution to relevant source-passages in the paper. Through a series of user studies, we demonstrate the utility of recursively expandable abstracts and identify future opportunities to support low-effort and just-in-time exploration of long-form information contexts through LLM-powered interactions. + + + +### LessonPlanner: Assisting Novice Teachers to Prepare Pedagogy-Driven Lesson Plans with Large Language Models +Authors: Haoxiang Fan, Guanzheng Chen, Xingbo Wang, Zhenhui Peng + +[Link](https://programs.sigchi.org/uist/2024/program/content/170883) + +Abstract: Preparing a lesson plan, e.g., a detailed road map with strategies and materials for instructing a 90-minute class, is beneficial yet challenging for novice teachers. Large language models (LLMs) can ease this process by generating adaptive content for lesson plans, which would otherwise require teachers to create from scratch or search existing resources. In this work, we first conduct a formative study with six novice teachers to understand their needs for support of preparing lesson plans with LLMs. Then, we develop LessonPlanner that assists users to interactively construct lesson plans with adaptive LLM-generated content based on Gagne's nine events. Our within-subjects study (N=12) shows that compared to the baseline ChatGPT interface, LessonPlanner can significantly improve the quality of outcome lesson plans and ease users' workload in the preparation process. Our expert interviews (N=6) further demonstrate LessonPlanner's usefulness in suggesting effective teaching strategies and meaningful educational resources. We discuss concerns on and design considerations for supporting teaching activities with LLMs. + + + + +## Break Q&A: Generating Visuals +### ShadowMagic: Designing Human-AI Collaborative Support for Comic Professionals’ Shadowing +Authors: Amrita Ganguly, Chuan Yan, John Chung, Tong Sun, YOON KIHEON, Yotam Gingold, Sungsoo Ray Hong + +[Link](https://programs.sigchi.org/uist/2024/program/content/170726) + +Abstract: Shadowing allows artists to convey realistic volume and emotion of characters in comic colorization. While AI technologies have the potential to improve professionals’ shadowing experience, current practice is manual and time-consuming. To understand how we can improve their shadowing experience, we conducted interviews with 5 professionals. We found that professionals’ level of engagement can vary depending on semantics, such as characters’ faces or hair. We also found they spent time on shadow “landscaping”—deciding where to place large shadow regions to create a realistic volumetric presentation while the final results can vary dramatically depending on their “staging” and “attention guiding” needs. We discovered they would accept AI suggestions for less engaging semantic parts or landscaping, while needing the capability to adjust details. Based on our observations, we developed ShadowMagic, which (1) generates AI-driven shadows based on commonly used light directions, (2) enables users to selectively choose results depending on semantics, and (3) allows users to complete shadow areas themselves for further perfection. Through a summative evaluation with 5 professionals, we found that they were significantly more satisfied with our AI-driven results compared to a baseline. We also found that ShadowMagic’s “step by step” workflow helps participants more easily adopt AI-driven results. We conclude by providing implications. + + + +### What's the Game, then? Opportunities and Challenges for Runtime Behavior Generation +BEST_PAPER + +Authors: Nicholas Jennings, Han Wang, Isabel Li, James Smith, Bjoern Hartmann + +[Link](https://programs.sigchi.org/uist/2024/program/content/170924) + +Abstract: Procedural content generation (PCG), the process of algorithmically creating game components instead of manually, has been a common tool of game development for decades. Recent advances in large language models (LLMs) enable the generation of game behaviors based on player input at runtime. Such code generation brings with it the possibility of entirely new gameplay interactions that may be difficult to integrate with typical game development workflows. We explore these implications through GROMIT, a novel LLM-based runtime behavior generation system for Unity. When triggered by a player action, GROMIT generates a relevant behavior which is compiled without developer intervention and incorporated into the game. We create three demonstration scenarios with GROMIT to investigate how such a technology might be used in game development. In a system evaluation we find that our implementation is able to produce behaviors that result in significant downstream impacts to gameplay. We then conduct an interview study with n=13 game developers using GROMIT as a probe to elicit their current opinion on runtime behavior generation tools, and enumerate the specific themes curtailing the wider use of such tools. We find that the main themes of concern are quality considerations, community expectations, and fit with developer workflows, and that several of the subthemes are unique to runtime behavior generation specifically. We outline a future work agenda to address these concerns, including the need for additional guardrail systems for behavior generation. + + + +### StyleFactory: Towards Better Style Alignment in Image Creation through Style-Strength-Based Control and Evaluation +Authors: Mingxu Zhou, Dengming Zhang, Weitao You, Ziqi Yu, Yifei Wu, Chenghao Pan, Huiting Liu, Tianyu Lao, Pei Chen + +[Link](https://programs.sigchi.org/uist/2024/program/content/170929) + +Abstract: Generative AI models have been widely used for image creation. However, generating images that are well-aligned with users' personal styles on aesthetic features (e.g., color and texture) can be challenging due to the poor style expression and interpretation between humans and models. Through a formative study, we observed that participants showed a clear subjective perception of the desired style and variations in its strength, which directly inspired us to develop style-strength-based control and evaluation. Building on this, we present StyleFactory, an interactive system that helps users achieve style alignment. Our interface enables users to rank images based on their strengths in the desired style and visualizes the strength distribution of other images in that style from the model's perspective. In this way, users can evaluate the understanding gap between themselves and the model, and define well-aligned personal styles for image creation through targeted iterations. Our technical evaluation and user study demonstrate that StyleFactory accurately generates images in specific styles, effectively facilitates style alignment in image creation workflow, stimulates creativity, and enhances the user experience in human-AI interactions. + + + +### AutoSpark: Supporting Automobile Appearance Design Ideation with Kansei Engineering and Generative AI +Authors: Liuqing Chen, Qianzhi Jing, Yixin Tsang, Qianyi Wang, Ruocong Liu, Duowei Xia, Yunzhan Zhou, Lingyun Sun + +[Link](https://programs.sigchi.org/uist/2024/program/content/170878) + +Abstract: Rapid creation of novel product appearance designs that align with consumer emotional requirements poses a significant challenge. Text-to-image models, with their excellent image generation capabilities, have demonstrated potential in providing inspiration to designers. However, designers still encounter issues including aligning emotional needs, expressing design intentions, and comprehending generated outcomes in practical applications. To address these challenges, we introduce AutoSpark, an interactive system that integrates Kansei Engineering and generative AI to provide creativity support for designers in creating automobile appearance designs that meet emotional needs. AutoSpark employs a Kansei Engineering engine powered by generative AI and a semantic network to assist designers in emotional need alignment, design intention expression, and prompt crafting. It also facilitates designers' understanding and iteration of generated results through fine-grained image-image similarity comparisons and text-image relevance assessments. The design-thinking map within its interface aids in managing the design process. Our user study indicates that AutoSpark effectively aids designers in producing designs that are more aligned with emotional needs and of higher quality compared to a baseline system, while also enhancing the designers' experience in the human-AI co-creation process. + + + + +## Break Q&A: Hacking Perception +### Predicting the Limits: Tailoring Unnoticeable Hand Redirection Offsets in Virtual Reality to Individuals’ Perceptual Boundaries +Authors: Martin Feick, Kora Regitz, Lukas Gehrke, André Zenner, Anthony Tang, Tobias Jungbluth, Maurice Rekrut, Antonio Krüger + +[Link](https://programs.sigchi.org/uist/2024/program/content/171017) + +Abstract: Many illusion and interaction techniques in Virtual Reality (VR) rely on Hand Redirection (HR), which has proved to be effective as long as the introduced offsets between the position of the real and virtual hand do not noticeably disturb the user experience. Yet calibrating HR offsets is a tedious and time-consuming process involving psychophysical experimentation, and the resulting thresholds are known to be affected by many variables---limiting HR's practical utility. As a result, there is a clear need for alternative methods that allow tailoring HR to the perceptual boundaries of individual users. We conducted an experiment with 18 participants combining movement, eye gaze and EEG data to detect HR offsets Below, At, and Above individuals' detection thresholds. Our results suggest that we can distinguish HR At and Above from no HR. Our exploration provides a promising new direction with potentially strong implications for the broad field of VR illusions. + + + +### Modulating Heart Activity and Task Performance using Haptic Heartbeat Feedback: A Study Across Four Body Placements +Authors: Andreia Valente, Dajin Lee, Seungmoon Choi, Mark Billinghurst, Augusto Esteves + +[Link](https://programs.sigchi.org/uist/2024/program/content/170839) + +Abstract: This paper explores the impact of vibrotactile haptic feedback on heart activity when the feedback is provided at four different body locations (chest, wrist, neck, and ankle) and with two feedback rates (50 bpm and 110 bpm). A user study found that the neck placement resulted in higher heart rates and lower heart rate variability, and higher frequencies correlated with increased heart rates and decreased heart rate variability. The chest was preferred in self-reported metrics, and neck placement was perceived as less satisfying, harmonious, and immersive. This research contributes to understanding the interplay between psychological experiences and physiological responses when using haptic biofeedback resembling real body signals. + + + +### Augmented Breathing via Thermal Feedback in the Nose +Authors: Jas Brooks, Alex Mazursky, Janice Hixon, Pedro Lopes + +[Link](https://programs.sigchi.org/uist/2024/program/content/170728) + +Abstract: We propose, engineer, and study a novel method to augment the feeling of breathing—enabling interactive applications to let users feel like they are inhaling more/less air (perceived nasal airflow). We achieve this effect by cooling or heating the nose in sync with the user’s inhalation. Our illusion builds on the physiology of breathing: we perceive our breath predominantly through the cooling of our nasal cavities during inhalation. This is why breathing in a “fresh” cold environment feels easier than in a “stuffy” hot environment, even when the inhaled volume is the same. Our psychophysical study confirmed that our in-nose temperature stimulation significantly influenced breathing perception in both directions: making it feel harder & easier to breathe. Further, we found that ~90% of the trials were described as a change in perceived airflow/breathing, while only ~8% as temperature. Following, we engineered a compact device worn across the septum that uses Peltier elements. We illustrate the potential of this augmented breathing in interactive contexts, such as for virtual reality (e.g., rendering ease of breathing crisp air or difficulty breathing with a deteriorated gas mask) and everyday interactions (e.g., in combination with a relaxation application or to alleviate the perceived breathing resistance when wearing a mask). + + + +### Thermal In Motion: Designing Thermal Flow Illusions with Tactile and Thermal Interaction +Authors: Yatharth Singhal, Daniel Honrales, Haokun Wang, Jin Ryong Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170896) + +Abstract: This study presents a novel method for creating moving thermal sensations by integrating the thermal referral illusion with tactile motion. Conducted through three experiments on human forearms, the first experiment examined the impact of temperature and thermal actuator placement on perceived thermal motion, finding the clearest perception with a centrally positioned actuator under both hot and cold conditions. The second experiment identified the speed thresholds of perceived thermal motion, revealing a wider detectable range in hot conditions (1.8 cm/s to 9.5cm/s) compared to cold conditions (2.4cm/s to 5.0cm/s). Finally, we integrated our approach into virtual reality (VR) to assess its feasibility through two interaction scenarios. Our results shed light on the comprehension of thermal perception and its integration with tactile cues, promising significant advancements in incorporating thermal motion into diverse thermal interfaces for immersive VR experiences. + + + + +## Break Q&A: Beyond mobile +### picoRing: battery-free rings for subtle thumb-to-index input +Authors: Ryo Takahashi, Eric Whitmire, Roger Boldu, Shiu Ng, Wolf Kienzle, Hrvoje Benko + +[Link](https://programs.sigchi.org/uist/2024/program/content/170844) + +Abstract: Smart rings for subtle, reliable finger input offer an attractive path for ubiquitous interaction with wearable computing platforms. +However, compared to ordinary rings worn for cultural or fashion reasons, smart rings are much bulkier and less comfortable, largely due to the space required for a battery, which also limits the space available for sensors. +This paper presents picoRing, a flexible sensing architecture that enables a variety of battery-free smart rings paired with a wristband. +By inductively connecting a wristband-based sensitive reader coil with a ring-based fully-passive sensor coil, picoRing enables the wristband to stably detect the passive response from the ring via a weak inductive coupling. +We demonstrate four different rings that support thumb-to-finger interactions like pressing, sliding, or scrolling. +When users perform these interactions, the corresponding ring converts each input into a unique passive response through a network of passive switches. +Combining the coil-based sensitive readout with the fully-passive ring design enables a tiny ring that weighs as little as 1.5 g and achieves a 13 cm stable readout despite finger bending, and proximity to metal. + + + +### WatchLink: Enhancing Smartwatches with Sensor Add-Ons via ECG Interface +Authors: Anandghan Waghmare, Ishan Chatterjee, Vikram Iyer, Shwetak Patel + +[Link](https://programs.sigchi.org/uist/2024/program/content/170782) + +Abstract: We introduce a low-power communication method that lets smartwatches leverage existing electrocardiogram (ECG) hardware as a data communication interface. Our unique approach enables the connection of external, inexpensive, and low-power "add-on" sensors to the smartwatch, expanding its functionalities. These sensors cater to specialized user needs beyond those offered by pre-built sensor suites, at a fraction of the cost and power of traditional communication protocols, including Bluetooth Low Energy. To demonstrate the feasibility of our approach, we conduct a series of exploratory and evaluative tests to characterize the ECG interface as a communication channel on commercial smartwatches. We design a simple transmission scheme using commodity components, demonstrating cost and power benefits. Further, we build and test a suite of add-on sensors, including UV light, body temperature, buttons, and breath alcohol, all of which achieved testing objectives at low material cost and power usage. This research paves the way for personalized and user-centric wearables by offering a cost-effective solution to expand their functionalities. + + + + +### PrISM-Observer: Intervention Agent to Help Users Perform Everyday Procedures Sensed using a Smartwatch +Authors: Riku Arakawa, Hiromu Yakura, Mayank Goel + +[Link](https://programs.sigchi.org/uist/2024/program/content/170914) + +Abstract: We routinely perform procedures (such as cooking) that include a set of atomic steps. Often, inadvertent omission or misordering of a single step can lead to serious consequences, especially for those experiencing cognitive challenges such as dementia. This paper introduces PrISM-Observer, a smartwatch-based, context-aware, real-time intervention system designed to support daily tasks by preventing errors. Unlike traditional systems that require users to seek out information, the agent observes user actions and intervenes proactively. This capability is enabled by the agent's ability to continuously update its belief in the user's behavior in real-time through multimodal sensing and forecast optimal intervention moments and methods. We first validated the steps-tracking performance of our framework through evaluations across three datasets with different complexities. Then, we implemented a real-time agent system using a smartwatch and conducted a user study in a cooking task scenario. The system generated helpful interventions, and we gained positive feedback from the participants. The general applicability of PrISM-Observer to daily tasks promises broad applications, for instance, including support for users requiring more involved interventions, such as people with dementia or post-surgical patients. + + + + +## Break Q&A: New realities +### SIM2VR: Towards Automated Biomechanical Testing in VR +Authors: Florian Fischer, Aleksi Ikkala, Markus Klar, Arthur Fleig, Miroslav Bachinski, Roderick Murray-Smith, Perttu Hämäläinen, Antti Oulasvirta, Jörg Müller + +[Link](https://programs.sigchi.org/uist/2024/program/content/170989) + +Abstract: Automated biomechanical testing has great potential for the development of VR applications, as initial insights into user behaviour can be gained in silico early in the design process. +In particular, it allows prediction of user movements and ergonomic variables, such as fatigue, prior to conducting user studies. +However, there is a fundamental disconnect between simulators hosting state-of-the-art biomechanical user models and simulators used to develop and run VR applications. +Existing user simulators often struggle to capture the intricacies of real-world VR applications, reducing ecological validity of user predictions. +In this paper, we introduce SIM2VR, a system that aligns user simulation with a given VR application by establishing a continuous closed loop between the two processes. +This, for the first time, enables training simulated users directly in the same VR application that real users interact with. +We demonstrate that SIM2VR can predict differences in user performance, ergonomics and strategies in a fast-paced, dynamic arcade game. In order to expand the scope of automated biomechanical testing beyond simple visuomotor tasks, advances in cognitive models and reward function design will be needed. + + + +### Hands-on, Hands-off: Gaze-Assisted Bimanual 3D Interaction +Authors: Mathias Lystbæk, Thorbjørn Mikkelsen, Roland Krisztandl, Eric Gonzalez, Mar Gonzalez-Franco, Hans Gellersen, Ken Pfeuffer + +[Link](https://programs.sigchi.org/uist/2024/program/content/171002) + +Abstract: Extended Reality (XR) systems with hand-tracking support direct manipulation of objects with both hands. A common interaction in this context is for the non-dominant hand (NDH) to orient an object for input by the dominant hand (DH). We explore bimanual interaction with gaze through three new modes of interaction where the input of the NDH, DH, or both hands is indirect based on Gaze+Pinch. These modes enable a new dynamic interplay between our hands, allowing flexible alternation between and pairing of complementary operations. Through applications, we demonstrate several use cases in the context of 3D modelling, where users exploit occlusion-free, low-effort, and fluid two-handed manipulation. To gain a deeper understanding of each mode, we present a user study on an asymmetric rotate-translate task. Most participants preferred indirect input with both hands for lower physical effort, without a penalty on user performance. Otherwise, they preferred modes where the NDH oriented the object directly, supporting preshaping of the hand, which is more challenging with indirect gestures. The insights gained are of relevance for the design of XR interfaces that aim to leverage eye and hand input in tandem. + + + +### Pro-Tact: Hierarchical Synthesis of Proprioception and Tactile Exploration for Eyes-Free Ray Pointing on Out-of-View VR Menus +Authors: Yeonsu Kim, Jisu Yim, Kyunghwan Kim, Yohan Yun, Geehyuk Lee + +[Link](https://programs.sigchi.org/uist/2024/program/content/170805) + +Abstract: We introduce Pro-Tact, a novel eyes-free pointing technique for interacting with out-of-view (OoV) VR menus. This technique combines rapid rough pointing using proprioception with fine-grain adjustments through tactile exploration, enabling menu interaction without visual attention. Our user study demonstrated that Pro-Tact allows users to select menu items accurately (95% accuracy for 54 items) in an eyes-free manner, with reduced fatigue and sickness compared to eyes-engaged interaction. Additionally, we observed that participants voluntarily interacted with OoV menus eyes-free when Pro-Tact's tactile feedback was provided in practical VR application usage contexts. This research contributes by introducing the novel interaction technique, Pro-Tact, and quantitatively evaluating its benefits in terms of performance, user experience, and user preference in OoV menu interactions. + + + +### GradualReality: Enhancing Physical Object Interaction in Virtual Reality via Interaction State-Aware Blending +Authors: HyunA Seo, Juheon Yi, Rajesh Balan, Youngki Lee + +[Link](https://programs.sigchi.org/uist/2024/program/content/170920) + +Abstract: We present GradualReality, a novel interface enabling a Cross Reality experience that includes gradual interaction with physical objects in a virtual environment and supports both presence and usability. Daily Cross Reality interaction is challenging as the user's physical object interaction state is continuously changing over time, causing their attention to frequently shift between the virtual and physical worlds. As such, presence in the virtual environment and seamless usability for interacting with physical objects should be maintained at a high level. To address this issue, we present an Interaction State-Aware Blending approach that (i) balances immersion and interaction capability and (ii) provides a fine-grained, gradual transition between virtual and physical worlds. The key idea includes categorizing the flow of physical object interaction into multiple states and designing novel blending methods that offer optimal presence and sufficient physical awareness at each state. We performed extensive user studies and interviews with a working prototype and demonstrated that GradualReality provides better Cross Reality experiences compared to baselines. + + + +### StegoType: Surface Typing from Egocentric Cameras +Authors: Mark Richardson, Fadi Botros, Yangyang Shi, Pinhao Guo, Bradford Snow, Linguang Zhang, Jingming Dong, Keith Vertanen, Shugao Ma, Robert Wang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170853) + +Abstract: Text input is a critical component of any general purpose computing system, yet efficient and natural text input remains a challenge in AR and VR. Headset based hand-tracking has recently become pervasive among consumer VR devices and affords the opportunity to enable touch typing on virtual keyboards. We present an approach for decoding touch typing on uninstrumented flat surfaces using only egocentric camera-based hand-tracking as input. While egocentric hand-tracking accuracy is limited by issues like self occlusion and image fidelity, we show that a sufficiently diverse training set of hand motions paired with typed text can enable a deep learning model to extract signal from this noisy input. +Furthermore, by carefully designing a closed-loop data collection process, we can train an end-to-end text decoder that accounts for natural sloppy typing on virtual keyboards. +We evaluate our work with a user study (n=18) showing a mean online throughput of 42.4 WPM with an uncorrected error rate (UER) of 7% with our method compared to a physical keyboard baseline of 74.5 WPM at 0.8% UER, showing progress towards unlocking productivity and high throughput use cases in AR/VR. + + + +### Eye-Hand Movement of Objects in Near Space Extended Reality +Authors: Uta Wagner, Andreas Asferg Jacobsen, Tiare Feuchtner, Hans Gellersen, Ken Pfeuffer + +[Link](https://programs.sigchi.org/uist/2024/program/content/170771) + +Abstract: Hand-tracking in Extended Reality (XR) enables moving objects in near space with direct hand gestures, to pick, drag and drop objects in 3D. In this work, we investigate the use of eye-tracking to reduce the effort involved in this interaction. As the eyes naturally look ahead to the target for a drag operation, the principal idea is to map the translation of the object in the image plane to gaze, such that the hand only needs to control the depth component of the operation. We have implemented four techniques that explore two factors: the use of gaze only to move objects in X-Y vs.\ extra refinement by hand, and the use of hand input in + the Z axis to directly move objects vs.\ indirectly via a transfer function. We compared all four techniques in a user study (N=24) against baselines of direct and indirect hand input. We detail user performance, effort and experience trade-offs and show that all eye-hand techniques significantly reduce physical effort over direct gestures, pointing toward effortless drag-and-drop for XR environments. + + + + +## Break Q&A: Contextual Augmentations +### StreetNav: Leveraging Street Cameras to Support Precise Outdoor Navigation for Blind Pedestrians +Authors: Gaurav Jain, Basel Hindi, Zihao Zhang, Koushik Srinivasula, Mingyu Xie, Mahshid Ghasemi, Daniel Weiner, Sophie Ana Paris, Xin Yi Therese Xu, Michael Malcolm, Mehmet Kerem Turkcan, Javad Ghaderi, Zoran Kostic, Gil Zussman, Brian Smith + +[Link](https://programs.sigchi.org/uist/2024/program/content/171003) + +Abstract: Blind and low-vision (BLV) people rely on GPS-based systems for outdoor navigation. GPS's inaccuracy, however, causes them to veer off track, run into obstacles, and struggle to reach precise destinations. While prior work has made precise navigation possible indoors via hardware installations, enabling this outdoors remains a challenge. Interestingly, many outdoor environments are already instrumented with hardware such as street cameras. In this work, we explore the idea of repurposing existing street cameras for outdoor navigation. Our community-driven approach considers both technical and sociotechnical concerns through engagements with various stakeholders: BLV users, residents, business owners, and Community Board leadership. The resulting system, StreetNav, processes a camera's video feed using computer vision and gives BLV pedestrians real-time navigation assistance. Our evaluations show that StreetNav guides users more precisely than GPS, but its technical performance is sensitive to environmental occlusions and distance from the camera. We discuss future implications for deploying such systems at scale. + + + +### WorldScribe: Towards Context-Aware Live Visual Descriptions +BEST_PAPER + +Authors: Ruei-Che Chang, Yuxuan Liu, Anhong Guo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170940) + +Abstract: Automated live visual descriptions can aid blind people in understanding their surroundings with autonomy and independence. However, providing descriptions that are rich, contextual, and just-in-time has been a long-standing challenge in accessibility. In this work, we develop WorldScribe, a system that generates automated live real-world visual descriptions that are customizable and adaptive to users' contexts: (i) WorldScribe's descriptions are tailored to users' intents and prioritized based on semantic relevance. (ii) WorldScribe is adaptive to visual contexts, e.g., providing consecutively succinct descriptions for dynamic scenes, while presenting longer and detailed ones for stable settings. (iii) WorldScribe is adaptive to sound contexts, e.g., increasing volume in noisy environments, or pausing when conversations start. Powered by a suite of vision, language, and sound recognition models, WorldScribe introduces a description generation pipeline that balances the tradeoffs between their richness and latency to support real-time use. The design of WorldScribe is informed by prior work on providing visual descriptions and a formative study with blind participants. Our user study and subsequent pipeline evaluation show that WorldScribe can provide real-time and fairly accurate visual descriptions to facilitate environment understanding that is adaptive and customized to users' contexts. Finally, we discuss the implications and further steps toward making live visual descriptions more context-aware and humanized. + + + +### CookAR: Affordance Augmentations in Wearable AR to Support Kitchen Tool Interactions for People with Low Vision +Authors: Jaewook Lee, Andrew Tjahjadi, Jiho Kim, Junpu Yu, Minji Park, Jiawen Zhang, Jon Froehlich, Yapeng Tian, Yuhang Zhao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170874) + +Abstract: Cooking is a central activity of daily living, supporting independence as well as mental and physical health. However, prior work has highlighted key barriers for people with low vision (LV) to cook, particularly around safely interacting with tools, such as sharp knives or hot pans. Drawing on recent advancements in computer vision (CV), we present CookAR, a head-mounted AR system with real-time object affordance augmentations to support safe and efficient interactions with kitchen tools. To design and implement CookAR, we collected and annotated the first egocentric dataset of kitchen tool affordances, fine-tuned an affordance segmentation model, and developed an AR system with a stereo camera to generate visual augmentations. To validate CookAR, we conducted a technical evaluation of our fine-tuned model as well as a qualitative lab study with 10 LV participants for suitable augmentation design. Our technical evaluation demonstrates that our model outperforms the baseline on our tool affordance dataset, while our user study indicates a preference for affordance augmentations over the traditional whole object augmentations. + + + +### DesignChecker: Visual Design Support for Blind and Low Vision Web Developers +Authors: Mina Huh, Amy Pavel + +[Link](https://programs.sigchi.org/uist/2024/program/content/170953) + +Abstract: Blind and low vision (BLV) developers create websites to share knowledge and showcase their work. A well-designed website can engage audiences and deliver information effectively, yet it remains challenging for BLV developers to review their web designs. We conducted interviews with BLV developers (N=9) and analyzed 20 websites created by BLV developers. BLV developers created highly accessible websites but wanted to assess the usability of their websites for sighted users and follow the design standards of other websites. They also encountered challenges using screen readers to identify illegible text, misaligned elements, and inharmonious colors. We present DesignChecker, a browser extension that helps BLV developers improve their web designs. With DesignChecker, users can assess their current design by comparing it to visual design guidelines, a reference website of their choice, or a set of similar websites. DesignChecker also identifies the specific HTML elements that violate design guidelines and suggests CSS changes for improvements. Our user study participants (N=8) recognized more visual design errors than using their typical workflow and expressed enthusiasm about using DesignChecker in the future. + + + + +## Break Q&A: Machine Learning for User Interfaces +### UIClip: A Data-driven Model for Assessing User Interface Design +Authors: Jason Wu, Yi-Hao Peng, Xin Yue Li, Amanda Swearngin, Jeffrey Bigham, Jeffrey Nichols + +[Link](https://programs.sigchi.org/uist/2024/program/content/170950) + +Abstract: User interface (UI) design is a difficult yet important task for ensuring the usability, accessibility, and aesthetic qualities of applications. In our paper, we develop a machine-learned model, UIClip, for assessing the design quality and visual relevance of a UI given its screenshot and natural language description. To train UIClip, we used a combination of automated crawling, synthetic augmentation, and human ratings to construct a large-scale dataset of UIs, collated by description and ranked by design quality. Through training on the dataset, UIClip implicitly learns properties of good and bad designs by (i) assigning a numerical score that represents a UI design's relevance and quality and (ii) providing design suggestions. In an evaluation that compared the outputs of UIClip and other baselines to UIs rated by 12 human designers, we found that UIClip achieved the highest agreement with ground-truth rankings. Finally, we present three example applications that demonstrate how UIClip can facilitate downstream applications that rely on instantaneous assessment of UI design quality: (i) UI code generation, (ii) UI design tips generation, and (iii) quality-aware UI example search. + + + +### UICrit: Enhancing Automated Design Evaluation with a UI Critique Dataset +Authors: Peitong Duan, Chin-Yi Cheng, Gang Li, Bjoern Hartmann, Yang Li + +[Link](https://programs.sigchi.org/uist/2024/program/content/170823) + +Abstract: Automated UI evaluation can be beneficial for the design process; for example, to compare different UI designs, or conduct automated heuristic evaluation. LLM-based UI evaluation, in particular, holds the promise of generalizability to a wide variety of UI types and evaluation tasks. However, current LLM-based techniques do not yet match the performance of human evaluators. We hypothesize that automatic evaluation can be improved by collecting a targeted UI feedback dataset and then using this dataset to enhance the performance of general-purpose LLMs. We present a targeted dataset of 3,059 design critiques and quality ratings for 983 mobile UIs, collected from seven designers, each with at least a year of professional design experience. We carried out an in-depth analysis to characterize the dataset's features. We then applied this dataset to achieve a 55\% performance gain in LLM-generated UI feedback via various few-shot and visual prompting techniques. We also discuss future applications of this dataset, including training a reward model for generative UI techniques, and fine-tuning a tool-agnostic multi-modal LLM that automates UI evaluation. + + + +### EyeFormer: Predicting Personalized Scanpaths with Transformer-Guided Reinforcement Learning +Authors: Yue Jiang, Zixin Guo, Hamed Rezazadegan Tavakoli, Luis Leiva, Antti Oulasvirta + +[Link](https://programs.sigchi.org/uist/2024/program/content/170925) + +Abstract: From a visual-perception perspective, modern graphical user interfaces (GUIs) comprise a complex graphics-rich two-dimensional visuospatial arrangement of text, images, and interactive objects such as buttons and menus. While existing models can accurately predict regions and objects that are likely to attract attention ``on average'', no scanpath model has been capable of predicting scanpaths for an individual. To close this gap, we introduce EyeFormer, which utilizes a Transformer architecture as a policy network to guide a deep reinforcement learning algorithm that predicts gaze locations. Our model offers the unique capability of producing personalized predictions when given a few user scanpath samples. It can predict full scanpath information, including fixation positions and durations, across individuals and various stimulus types. Additionally, we demonstrate applications in GUI layout optimization driven by our model. + + + +### GPTVoiceTasker: Advancing Multi-step Mobile Task Efficiency Through Dynamic Interface Exploration and Learning +Authors: Minh Duc Vu, Han Wang, Jieshan Chen, Zhuang Li, Shengdong Zhao, Zhenchang Xing, Chunyang Chen + +[Link](https://programs.sigchi.org/uist/2024/program/content/170994) + +Abstract: Virtual assistants have the potential to play an important role in helping users achieve different tasks. However, these systems face challenges in their real-world usability, characterized by inefficiency and struggles in grasping user intentions. Leveraging recent advances in Large Language Models (LLMs), we introduce GPTVoiceTasker, a virtual assistant poised to enhance user experiences and task efficiency on mobile devices. GPTVoiceTasker excels at intelligently deciphering user commands and executing relevant device interactions to streamline task completion. For unprecedented tasks, GPTVoiceTasker utilises the contextual information and on-screen content to continuously explore and execute the tasks. In addition, the system continually learns from historical user commands to automate subsequent task invocations, further enhancing execution efficiency. From our experiments, GPTVoiceTasker achieved 84.5% accuracy in parsing human commands into executable actions and 85.7% accuracy in automating multi-step tasks. In our user study, GPTVoiceTasker boosted task efficiency in real-world scenarios by 34.85%, accompanied by positive participant feedback. We made GPTVoiceTasker open-source, inviting further research into LLMs utilization for diverse tasks through prompt engineering and leveraging user usage data to improve efficiency. + + + +### VisionTasker: Mobile Task Automation Using Vision Based UI Understanding and LLM Task Planning +Authors: Yunpeng Song, Yiheng Bian, Yongtao Tang, Guiyu Ma, Zhongmin Cai + +[Link](https://programs.sigchi.org/uist/2024/program/content/170816) + +Abstract: Mobile task automation is an emerging field that leverages AI to streamline and optimize the execution of routine tasks on mobile devices, thereby enhancing efficiency and productivity. Traditional methods, such as Programming By Demonstration (PBD), are limited due to their dependence on predefined tasks and susceptibility to app updates. Recent advancements have utilized the view hierarchy to collect UI information and employed Large Language Models (LLM) to enhance task automation. However, view hierarchies have accessibility issues and face potential problems like missing object descriptions or misaligned structures. This paper introduces VisionTasker, a two-stage framework combining vision-based UI understanding and LLM task planning, for mobile task automation in a step-by-step manner. VisionTasker firstly converts a UI screenshot into natural language interpretations using a vision-based UI understanding approach, eliminating the need for view hierarchies. Secondly, it adopts a step-by-step task planning method, presenting one interface at a time to the LLM. The LLM then identifies relevant elements within the interface and determines the next action, enhancing accuracy and practicality. Extensive experiments show that VisionTasker outperforms previous methods, providing effective UI representations across four datasets. Additionally, in automating 147 real-world tasks on an Android smartphone, VisionTasker demonstrates advantages over humans in tasks where humans show unfamiliarity and shows significant improvements when integrated with the PBD mechanism. VisionTasker is open-source and available at https://github.com/AkimotoAyako/VisionTasker. + + + + +## Break Q&A: Poses as Input +### SolePoser: Real-Time 3D Human Pose Estimation using Insole Pressure Sensors +Authors: Erwin Wu, Rawal Khirodkar, Hideki Koike, Kris Kitani + +[Link](https://programs.sigchi.org/uist/2024/program/content/170905) + +Abstract: We propose SolePoser, a real-time 3D pose estimation system that leverages only a single pair of insole sensors. Unlike conventional methods relying on fixed cameras or bulky wearable sensors, our approach offers minimal and natural setup requirements. The proposed system utilizes pressure and IMU sensors embedded in insoles to capture the body weight's pressure distribution at the feet and its 6 DoF acceleration. This information is used to estimate the 3D full-body joint position by a two-stream transformer network. A novel double-cycle consistency loss and a cross-attention module are further introduced to learn the relationship between 3D foot positions and their pressure distributions. +We also introduced two different datasets of sports and daily exercises, offering 908k frames across eight different activities. Our experiments show that our method's performance is on par with top-performing approaches, which utilize more IMUs and even outperform third-person-view camera-based methods in certain scenarios. + + + +### Gait Gestures: Examining Stride and Foot Strike Variation as an Input Method While Walking +Authors: Ching-Yi Tsai, Ryan Yen, Daekun Kim, Daniel Vogel + +[Link](https://programs.sigchi.org/uist/2024/program/content/170926) + +Abstract: Walking is a cyclic pattern of alternating footstep strikes, with each pair of steps forming a stride, and a series of strides forming a gait. We conduct a systematic examination of different kinds of intentional variations from a normal gait that could be used as input actions without interrupting overall walking progress. A design space of 22 candidate Gait Gestures is generated by adapting previous standing foot input actions and identifying new actions possible in a walking context. A formative study (n=25) examines movement easiness, social acceptability, and walking compatibility with foot movement logging to calculate temporal and spatial characteristics. Using a categorization of these results, 7 gestures are selected for a wizard-of-oz prototype demonstrating an AR interface controlled by Gait Gestures for ordering food and audio playback while walking. As a technical proof-of-concept, a gait gesture recognizer is developed and tested using the formative study data. + + + +### EgoTouch: On-Body Touch Input Using AR/VR Headset Cameras +Authors: Vimal Mollyn, Chris Harrison + +[Link](https://programs.sigchi.org/uist/2024/program/content/170875) + +Abstract: In augmented and virtual reality (AR/VR) experiences, a user’s arms and hands can provide a convenient and tactile surface for touch input. Prior work has shown on-body input to have significant speed, accuracy, and ergonomic benefits over in-air interfaces, which are common today. In this work, we demonstrate high accuracy, bare hands (i.e., no special instrumentation of the user) skin input using just an RGB camera, like those already integrated into all modern XR headsets. Our results show this approach can be accurate, and robust across diverse lighting conditions, skin tones, and body motion (e.g., input while walking). Finally, our pipeline also provides rich input metadata including touch force, finger identification, angle of attack, and rotation. We believe these are the requisite technical ingredients to more fully unlock on-skin interfaces that have been well motivated in the HCI literature but have lacked robust and practical methods. + + + +### MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices +Authors: Vasco Xu, Chenfeng Gao, Henry Hoffman, Karan Ahuja + +[Link](https://programs.sigchi.org/uist/2024/program/content/170732) + +Abstract: There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few. + + + +### Touchscreen-based Hand Tracking for Remote Whiteboard Interaction +Authors: Xinshuang Liu, Yizhong Zhang, Xin Tong + +[Link](https://programs.sigchi.org/uist/2024/program/content/170956) + +Abstract: In whiteboard-based remote communication, the seamless integration of drawn content and hand-screen interactions is essential for an immersive user experience. Previous methods either require bulky device setups for capturing hand gestures or fail to accurately track the hand poses from capacitive images. In this paper, we present a real-time method for precise tracking 3D poses of both hands from capacitive video frames. To this end, we develop a deep neural network to identify hands and infer hand joint positions from capacitive frames, and then recover 3D hand poses from the hand-joint positions via a constrained inverse kinematic solver. Additionally, we design a device setup for capturing high-quality hand-screen interaction data and obtained a more accurate synchronized capacitive video and hand pose dataset. Our method improves the accuracy and stability of 3D hand tracking for capacitive frames while maintaining a compact device setup for remote communication. We validate our scheme design and its superior performance on 3D hand pose tracking and demonstrate the effectiveness of our method in whiteboard-based remote communication. + + + +### SeamPose: Repurposing Seams as Capacitive Sensors in a Shirt for Upper-Body Pose Tracking +Authors: Tianhong Yu, Mary Zhang, Peter He, Chi-Jung Lee, Cassidy Cheesman, Saif Mahmud, Ruidong Zhang, Francois Guimbretiere, Cheng Zhang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170739) + +Abstract: Seams are areas of overlapping fabric formed by stitching two or more pieces of fabric together in the cut-and-sew apparel manufacturing process. In SeamPose, we repurposed seams as capacitive sensors in a shirt for continuous upper-body pose estimation. Compared to previous all-textile motion-capturing garments that place the electrodes on the clothing surface, our solution leverages existing seams inside of a shirt by machine-sewing insulated conductive threads over the seams. The unique invisibilities and placements of the seams afford the sensing shirt to look and wear similarly as a conventional shirt while providing exciting pose-tracking capabilities. To validate this approach, we implemented a proof-of-concept untethered shirt with 8 capacitive sensing seams. With a 12-participant user study, our customized deep-learning pipeline accurately estimates the relative (to the pelvis) upper-body 3D joint positions with a mean per joint position error (MPJPE) of 6.0 cm. SeamPose represents a step towards unobtrusive integration of smart clothing for everyday pose estimation. + + + + +## Break Q&A: A11y +### ProgramAlly: Creating Custom Visual Access Programs via Multi-Modal End-User Programming +Authors: Jaylin Herskovitz, Andi Xu, Rahaf Alharbi, Anhong Guo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170960) + +Abstract: Existing visual assistive technologies are built for simple and common use cases, and have few avenues for blind people to customize their functionalities. Drawing from prior work on DIY assistive technology, this paper investigates end-user programming as a means for users to create and customize visual access programs to meet their unique needs. We introduce ProgramAlly, a system for creating custom filters for visual information, e.g., 'find NUMBER on BUS', leveraging three end-user programming approaches: block programming, natural language, and programming by example. To implement ProgramAlly, we designed a representation of visual filtering tasks based on scenarios encountered by blind people, and integrated a set of on-device and cloud models for generating and running these programs. In user studies with 12 blind adults, we found that participants preferred different programming modalities depending on the task, and envisioned using visual access programs to address unique accessibility challenges that are otherwise difficult with existing applications. Through ProgramAlly, we present an exploration of how blind end-users can create visual access programs to customize and control their experiences. + + + +### Accessible Gesture Typing on Smartphones for People with Low Vision +Authors: Dan Zhang, Zhi Li, Vikas Ashok, William H Seiple, IV Ramakrishnan, Xiaojun Bi + +[Link](https://programs.sigchi.org/uist/2024/program/content/170887) + +Abstract: While gesture typing is widely adopted on touchscreen keyboards, its support for low vision users is limited. We have designed and implemented two keyboard prototypes, layout-magnified and key-magnified keyboards, to enable gesture typing for people with low vision. Both keyboards facilitate uninterrupted access to all keys while the screen magnifier is active, allowing people with low vision to input text with one continuous stroke. Furthermore, we have created a kinematics-based decoding algorithm to accommodate the typing behavior of people with low vision. This algorithm can decode the gesture input even if the gesture trace deviates from a pre-defined word template, and the starting position of the gesture is far from the starting letter of the target word. Our user study showed that the key-magnified keyboard achieved 5.28 words per minute, 27.5% faster than a conventional gesture typing keyboard with voice feedback. + + + +### AccessTeleopKit: A Toolkit for Creating Accessible Web-Based Interfaces for Tele-Operating an Assistive Robot +Authors: Vinitha Ranganeni, Varad Dhat, Noah Ponto, Maya Cakmak + +[Link](https://programs.sigchi.org/uist/2024/program/content/170825) + +Abstract: Mobile manipulator robots, which can move around and physically interact with their environments, can empower people with motor limitations to independently carry out many activities of daily living. While many interfaces have been developed for tele-operating complex robots, most of them are not accessible to people with severe motor limitations. Further, most interfaces are rigid with limited configurations and are not readily available to download and use. To address these barriers, we developed AccessTeleopKit: an open-source toolkit for creating custom and accessible robot tele-operation interfaces based on cursor-and-click input for the Stretch 3 mobile-manipulator. With AccessTeleopKit users can add, remove, and rearrange components such as buttons and camera views, and select between a variety of control modes. We describe the participatory and iterative design process that led to the current implementation of AccessTeleopKit, involving three long-term deployments of the robot in the home of a quadriplegic user. We demonstrate how AccessTeleopKit allowed the user to create different interfaces for different tasks and the diversity of tasks it allowed the user to carry out. We also present two studies involving six additional users with severe motor limitations, demonstrating the power of AccessTeleopKit in creating custom interfaces for different user needs and preferences. + + + +### Memory Reviver: Supporting Photo-Collection Reminiscence for People with Visual Impairment via a Proactive Chatbot +Authors: Shuchang Xu, Chang Chen, Zichen LIU, Xiaofu Jin, Linping Yuan, Yukang Yan, Huamin Qu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170852) + +Abstract: Reminiscing with photo collections offers significant psychological benefits but poses challenges for people with visual impairment (PVI). Their current reliance on sighted help restricts the flexibility of this activity. In response, we explored using a chatbot in a preliminary study. We identified two primary challenges that hinder effective reminiscence with a chatbot: the scattering of information and a lack of proactive guidance. To address these limitations, we present Memory Reviver, a proactive chatbot that helps PVI reminisce with a photo collection through natural language communication. Memory Reviver incorporates two novel features: (1) a Memory Tree, which uses a hierarchical structure to organize the information in a photo collection; and (2) a Proactive Strategy, which actively delivers information to users at proper conversation rounds. Evaluation with twelve PVI demonstrated that Memory Reviver effectively facilitated engaging reminiscence, enhanced understanding of photo collections, and delivered natural conversational experiences. Based on our findings, we distill implications for supporting photo reminiscence and designing chatbots for PVI. + + + +### VizAbility: Enhancing Chart Accessibility with LLM-based Conversational Interaction +Authors: Joshua Gorniak, Yoon Kim, Donglai Wei, Nam Wook Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/171009) + +Abstract: Traditional accessibility methods like alternative text and data tables typically underrepresent data visualization's full potential. Keyboard-based chart navigation has emerged as a potential solution, yet efficient data exploration remains challenging. We present VizAbility, a novel system that enriches chart content navigation with conversational interaction, enabling users to use natural language for querying visual data trends. VizAbility adapts to the user's navigation context for improved response accuracy and facilitates verbal command-based chart navigation. Furthermore, it can address queries for contextual information, designed to address the needs of visually impaired users. We designed a large language model (LLM)-based pipeline to address these user queries, leveraging chart data & encoding, user context, and external web knowledge. We conducted both qualitative and quantitative studies to evaluate VizAbility's multimodal approach. We discuss further opportunities based on the results, including improved benchmark testing, incorporation of vision models, and integration with visualization workflows. + + + +### Computational Trichromacy Reconstruction: Empowering the Color-Vision Deficient to Recognize Colors Using Augmented Reality +Authors: Yuhao Zhu, Ethan Chen, Colin Hascup, Yukang Yan, Gaurav Sharma + +[Link](https://programs.sigchi.org/uist/2024/program/content/170991) + +Abstract: We propose an assistive technology that helps individuals with Color Vision Deficiencies (CVD) to recognize/name colors. +A dichromat's color perception is a reduced two-dimensional (2D) subset of a normal +trichromat's three dimensional color (3D) perception, leading to confusion when visual stimuli that appear identical to the dichromat are referred to by different color names. +Using our proposed system, CVD individuals can interactively induce distinct perceptual changes to originally confusing colors via a computational color space transformation. +By combining their original 2D precepts for colors with the discriminative changes, a three dimensional color space is reconstructed, where the dichromat can learn to resolve color name confusions and accurately recognize colors. +Our system is implemented as an Augmented Reality (AR) interface on smartphones, where users interactively control the rotation through swipe gestures and observe the induced color shifts in the camera view or in a displayed image. Through psychophysical experiments and a longitudinal user study, we demonstrate that such rotational color shifts have discriminative power (initially confusing colors become distinct under rotation) and exhibit structured perceptual shifts dichromats can learn with modest training. The AR App is also evaluated in two real-world scenarios (building with lego blocks and interpreting artistic works); users all report positive experience in using the App to recognize object colors that they otherwise could not. + + + + +## Break Q&A: Sustainable Interfaces +### Degrade to Function: Towards Eco-friendly Morphing Devices that Function Through Programmed Sequential Degradation +Authors: Qiuyu Lu, Semina Yi, Mengtian Gan, Jihong Huang, Xiao Zhang, Yue Yang, Chenyi Shen, Lining Yao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170959) + +Abstract: While it seems counterintuitive to think of degradation within an operating device as beneficial, one may argue that when rationally designed, the controlled breakdown of materials—physical, chemical, or biological—can be harnessed for specific functions. To apply this principle to the design of morphing devices, we introduce the concept of "Degrade to Function" (DtF). This concept aims to create eco-friendly and self-contained morphing devices that operate through a sequence of environmentally-triggered degradations. We explore its design considerations and implementation techniques by identifying environmental conditions and degradation types that can be exploited, evaluating potential materials capable of controlled degradation, suggesting designs for structures that can leverage degradation to achieve various transformations and functions, and developing sequential control approaches that integrate degradation triggers. To demonstrate the viability and versatility of this design strategy, we showcase several application examples across a range of environmental conditions. + + + +### WasteBanned: Supporting Zero Waste Fashion Design Through Linked Edits +Authors: Ruowang Zhang, Stefanie Mueller, Gilbert Bernstein, Adriana Schulz, Mackenzie Leake + +[Link](https://programs.sigchi.org/uist/2024/program/content/170976) + +Abstract: The commonly used cut-and-sew garment construction process, in which 2D fabric panels are cut from sheets of fabric and assembled into 3D garments, contributes to widespread textile waste in the fashion industry. There is often a significant divide between the design of the garment and the layout of the panels. One opportunity for bridging this gap is the emerging study and practice of zero waste fashion design, which involves creating clothing designs with maximum layout efficiency. Enforcing the strict constraints of zero waste sewing is challenging, as edits to one region of the garment necessarily affect neighboring panels. Based on our formative work to understand this emerging area within fashion design, we present WasteBanned, a tool that combines CAM and CAD to help users prioritize efficient material usage, work within these zero waste constraints, and edit existing zero waste garment patterns. Our user evaluation indicates that our tool helps fashion designers edit zero waste patterns to fit different bodies and add stylistic variation, while creating highly efficient fabric layouts. + + + +### HoloChemie - Sustainable Fabrication of Soft Biochemical Holographic Devices for Ubiquitous Sensing +Authors: Sutirtha Roy, Moshfiq-Us-Saleheen Chowdhury, Jurjaan Noim, Richa Pandey, Aditya Shekhar Nittala + +[Link](https://programs.sigchi.org/uist/2024/program/content/170931) + +Abstract: Sustainable fabrication approaches and biomaterials are increasingly being used in HCI to fabricate interactive devices. However, the majority of the work has focused on integrating electronics. This paper takes a sustainable approach to exploring the fabrication of biochemical sensing devices. Firstly, we contribute a set of biochemical formulations for biological and environmental sensing with bio-sourced and environment-friendly substrate materials. Our formulations are based on a combination of enzymes derived from bacteria and fungi, plant extracts and commercially available chemicals to sense both liquid and gaseous analytes: glucose, lactic acid, pH levels and carbon dioxide. Our novel holographic sensing scheme allows for detecting the presence of analytes and enables quantitative estimation of the analyte levels. We present a set of application scenarios that demonstrate the versatility of our approach and discuss the sustainability aspects, its limitations, and the implications for bio-chemical systems in HCI. + + + + +## Break Q&A: FABulous +### Facilitating the Parametric Definition of Geometric Properties in Programming-Based CAD +Authors: J Gonzalez Avila, Thomas Pietrzak, Audrey Girouard, Géry Casiez + +[Link](https://programs.sigchi.org/uist/2024/program/content/170736) + +Abstract: Parametric Computer-aided design (CAD) enables the creation of reusable models by integrating variables into geometric properties, facilitating customization without a complete redesign. However, creating parametric designs in programming-based CAD presents significant challenges. Users define models in a code editor using a programming language, with the application generating a visual representation in a viewport. This process involves complex programming and arithmetic expressions to describe geometric properties, linking various object properties to create parametric designs. Unfortunately, these applications lack assistance, making the process unnecessarily demanding. We propose a solution that allows users to retrieve parametric expressions from the visual representation for reuse in the code, streamlining the design process. We demonstrated this concept through a proof-of-concept implemented in the programming-based CAD application, OpenSCAD, and conducted an experiment with 11 users. Our findings suggest that this solution could significantly reduce design errors, improve interactivity and engagement in the design process, and lower the entry barrier for newcomers by reducing the mathematical skills typically required in programming-based CAD applications + + + +### Rhapso: Automatically Embedding Fiber Materials into 3D Prints for Enhanced Interactivity +Authors: Daniel Ashbrook, Wei-Ju Lin, Nicholas Bentley, Diana Soponar, Zeyu Yan, Valkyrie Savage, Lung-Pan Cheng, Huaishu Peng, Hyunyoung Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170936) + +Abstract: We introduce Rhapso, a 3D printing system designed to embed a diverse range of continuous fiber materials within 3D objects during the printing process. This approach enables integrating properties like tensile strength, force storage and transmission, or aesthetic and tactile characteristics, directly into low-cost thermoplastic 3D prints. These functional objects can have intricate actuation, self-assembly, and sensing capabilities with little to no manual intervention. To achieve this, we modify a low-cost Fused Filament Fabrication (FFF) 3D printer, adding a stepper motor-controlled fiber spool mechanism on a gear ring above the print bed. In addition to hardware, we provide parsing software for precise fiber placement, which generates Gcode for printer operation. To illustrate the versatility of our system, we present applications that showcase its extensive design potential. Additionally, we offer comprehensive documentation and open designs, empowering others to replicate our system and explore its possibilities. + + + + +### Speed-Modulated Ironing: High-Resolution Shade and Texture Gradients in Single-Material 3D Printing +Authors: Mehmet Ozdemir, Marwa AlAlawi, Mustafa Doga Dogan, Jose Martinez Castro, Stefanie Mueller, Zjenja Doubrovski + +[Link](https://programs.sigchi.org/uist/2024/program/content/170731) + +Abstract: We present Speed-Modulated Ironing, a new fabrication method for programming visual and tactile properties in single-material 3D printing. We use one nozzle to 3D print and a second nozzle to reheat printed areas at varying speeds, controlling the material's temperature-response. The rapid adjustments of speed allow for fine-grained reheating, enabling high-resolution color and texture variations. We implemented our method in a tool that allows users to assign desired properties to 3D models and creates corresponding 3D printing instructions. We demonstrate our method with three temperature-responsive materials: a foaming filament, a filament with wood fibers, and a filament with cork particles. These filaments respond to temperature by changing color, roughness, transparency, and gloss. Our technical evaluation reveals the capabilities of our method in achieving sufficient resolution and color shade range that allows surface details such as small text, photos, and QR codes on 3D-printed objects. Finally, we provide application examples demonstrating the new design capabilities enabled by Speed-Modulated Ironing. + + + +### TRAvel Slicer: Continuous Extrusion Toolpaths for 3D Printing +Authors: Jaime Gould, Camila Friedman-Gerlicz, Leah Buechley + +[Link](https://programs.sigchi.org/uist/2024/program/content/170996) + +Abstract: In this paper we present Travel Reduction Algorithm (TRAvel) Slicer, which minimizes travel movements in 3D printing. Conventional slicing software generates toolpaths with many travel movements--movements without material extrusion. Some 3D printers are incapable of starting and stopping extrusion and it is difficult to impossible to control the extrusion of many materials. This makes toolpaths with travel movements unsuitable for a wide range of printers and materials. + +We developed the open-source TRAvel Slicer to enable the printing of complex 3D models on a wider range of printers and in a wider range of materials than is currently possible. TRAvel Slicer minimizes two different kinds of travel movements--what we term Inner- and Outer-Model travel. We minimize Inner-Model travel (travel within the 3D model) by generating space-filling Fermat spirals for each contiguous planar region of the model. We minimize Outer-Model travel (travels outside of the 3D model) by ordering the printing of different branches of the model, thus limiting transitions between branches. We present our algorithm and software and then demonstrate how: 1) TRAvel Slicer makes it possible to generate high-quality prints from a metal-clay material, CeraMetal, that is functionally unprintable using an off-the-shelf slicer. 2) TRAvel Slicer dramatically increases the printing efficiency of traditional plastic 3D printing compared to an off-the-shelf slicer. + + + +### Understanding and Supporting Debugging Workflows in CAD +Authors: Felix Hähnlein, Gilbert Bernstein, Adriana Schulz + +[Link](https://programs.sigchi.org/uist/2024/program/content/170944) + +Abstract: One of the core promises of parametric Computer-Aided Design (CAD) is that users can easily edit their model at any point in time. +However, due to the ambiguity of changing references to intermediate, updated geometry, parametric edits can lead to reference errors which are difficult to fix in practice. +We claim that debugging reference errors remains challenging because CAD systems do not provide users with tools to understand where the error happened and how to fix it. +To address these challenges, we prototype a graphical debugging tool, DeCAD, which helps comparing CAD model states both across operations and across edits. +In a qualitative lab study, we use DeCAD as a probe to understand specific challenges that users face and what workflows they employ to overcome them. +We conclude with design implications for future debugging tool developers. + + + + + +## Break Q&A: Programming UI +### NotePlayer: Engaging Jupyter Notebooks for Dynamic Presentation of Analytical Processes +Authors: Yang Ouyang, Leixian Shen, Yun Wang, Quan Li + +[Link](https://programs.sigchi.org/uist/2024/program/content/170819) + +Abstract: Diverse presentation formats play a pivotal role in effectively conveying code and analytical processes during data analysis. One increasingly popular format is tutorial videos, particularly those based on Jupyter notebooks, which offer an intuitive interpretation of code and vivid explanations of analytical procedures. However, creating such videos requires a diverse skill set and significant manual effort, posing a barrier for many analysts. To bridge this gap, we introduce an innovative tool called NotePlayer, which connects notebook cells to video segments and incorporates a computational engine with language models to streamline video creation and editing. Our aim is to make the process more accessible and efficient for analysts. To inform the design of NotePlayer, we conducted a formative study and performed content analysis on a corpus of 38 Jupyter tutorial videos. This helped us identify key patterns and challenges encountered in existing tutorial videos, guiding the development of NotePlayer. Through a combination of a usage scenario and a user study, we validated the effectiveness of NotePlayer. The results show that the tool streamlines the video creation and facilitates the communication process for data analysts. + + + +### Tyche: Making Sense of Property-Based Testing Effectiveness +Authors: Harrison Goldstein, Jeffrey Tao, Zac Hatfield-Dodds, Benjamin Pierce, Andrew Head + +[Link](https://programs.sigchi.org/uist/2024/program/content/170922) + +Abstract: Software developers increasingly rely on automated methods to assess the +correctness of their code. One such method is property-based testing +(PBT), wherein a test harness generates hundreds or thousands of inputs +and checks the outputs of the program on those inputs using parametric +properties. Though powerful, PBT induces a sizable gulf of evaluation: +developers need to put in nontrivial effort to understand how well the +different test inputs exercise the software under test. To bridge this +gulf, we propose Tyche, a user interface that supports sensemaking +around the effectiveness of property-based tests. Guided by a formative +design exploration, our design of Tyche supports developers with +interactive, configurable views of test behavior with tight integrations +into modern developer testing workflow. These views help developers +explore global testing behavior and individual test inputs alike. To +accelerate the development of powerful, interactive PBT tools, we define +a standard for PBT test reporting and integrate it with a widely used +PBT library. A self-guided online usability study revealed that Tyche's +visualizations help developers to more accurately assess software +testing effectiveness. + + + + +### CoLadder: Manipulating Code Generation via Multi-Level Blocks +Authors: Ryan Yen, Jiawen Zhu, Sangho Suh, Haijun Xia, Jian Zhao + +[Link](https://programs.sigchi.org/uist/2024/program/content/171012) + +Abstract: This paper adopted an iterative design process to gain insights into programmers' strategies when using LLMs for programming. We proposed CoLadder, a novel system that supports programmers by facilitating hierarchical task decomposition, direct code segment manipulation, and result evaluation during prompt authoring. A user study with 12 experienced programmers showed that CoLadder is effective in helping programmers externalize their problem-solving intentions flexibly, improving their ability to evaluate and modify code across various abstraction levels, from their task's goal to final code implementation. + + + +### SQLucid: Grounding Natural Language Database Queries with Interactive Explanations +Authors: Yuan Tian, Jonathan Kummerfeld, Toby Li, Tianyi Zhang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170951) + +Abstract: Though recent advances in machine learning have led to significant improvements in natural language interfaces for databases, the accuracy and reliability of these systems remain limited, especially in high-stakes domains. This paper introduces SQLucid, a novel user interface that bridges the gap between non-expert users and complex database querying processes. SQLucid addresses existing limitations by integrating visual correspondence, intermediate query results, and editable step-by-step SQL explanations in natural language to facilitate user understanding and engagement. This unique blend of features empowers users to understand and refine SQL queries easily and precisely. Two user studies and one quantitative experiment were conducted to validate SQLucid’s effectiveness, showing significant improvement in task completion accuracy and user confidence compared to existing interfaces. Our code is available at https://github.com/magic-YuanTian/SQLucid. + + + + +## Break Q&A: AI & Automation +### Memolet: Reifying the Reuse of User-AI Conversational Memories +Authors: Ryan Yen, Jian Zhao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170751) + +Abstract: As users engage more frequently with AI conversational agents, conversations may exceed their memory capacity, leading to failures in correctly leveraging certain memories for tailored responses. However, in finding past memories that can be reused or referenced, users need to retrieve relevant information in various conversations and articulate to the AI their intention to reuse these memories. To support this process, we introduce Memolet, an interactive object that reifies memory reuse. Users can directly manipulate Memolet to specify which memories to reuse and how to use them. We developed a system demonstrating Memolet's interaction across various memory reuse stages, including memory extraction, organization, prompt articulation, and generation refinement. We examine the system's usefulness with an N=12 within-subject study and provide design implications for future systems that support user-AI conversational memory reusing. + + + +### VIME: Visual Interactive Model Explorer for Identifying Capabilities and Limitations of Machine Learning Models for Sequential Decision-Making +Authors: Anindya Das Antar, Somayeh Molaei, Yan-Ying Chen, Matthew Lee, Nikola Banovic + +[Link](https://programs.sigchi.org/uist/2024/program/content/170861) + +Abstract: Ensuring that Machine Learning (ML) models make correct and meaningful inferences is necessary for the broader adoption of such models into high-stakes decision-making scenarios. Thus, ML model engineers increasingly use eXplainable AI (XAI) tools to investigate the capabilities and limitations of their ML models before deployment. However, explaining sequential ML models, which make a series of decisions at each timestep, remains challenging. We present Visual Interactive Model Explorer (VIME), an XAI toolbox that enables ML model engineers to explain decisions of sequential models in different ``what-if'' scenarios. Our evaluation with 14 ML experts, who investigated two existing sequential ML models using VIME and a baseline XAI toolbox to explore ``what-if'' scenarios, showed that VIME made it easier to identify and explain instances when the models made wrong decisions compared to the baseline. Our work informs the design of future interactive XAI mechanisms for evaluating sequential ML-based decision support systems. + + + +### SERENUS: Alleviating Low-Battery Anxiety Through Real-time, Accurate, and User-Friendly Energy Consumption Prediction of Mobile Applications +Authors: Sera Lee, Dae R. Jeong, Junyoung Choi, Jaeheon Kwak, Seoyun Son, Jean Song, Insik Shin + +[Link](https://programs.sigchi.org/uist/2024/program/content/170937) + +Abstract: Low-battery anxiety has emerged as a result of growing dependence on mobile devices, where the anxiety arises when the battery level runs low. While battery life can be extended through power-efficient hardware and software optimization techniques, low-battery anxiety will remain a phenomenon as long as mobile devices rely on batteries. In this paper, we investigate how an accurate real-time energy consumption prediction at the application-level can improve the user experience in low-battery situations. We present Serenus, a mobile system framework specifically tailored to predict the energy consumption of each mobile application and present the prediction in a user-friendly manner. We conducted user studies using Serenus to verify that highly accurate energy consumption predictions can effectively alleviate low-battery anxiety by assisting users in planning their application usage based on the remaining battery life. We summarize requirements to mitigate users’ anxiety, guiding the design of future mobile system frameworks. + + + + +## Break Q&A: AI as Copilot +### DiscipLink: Unfolding Interdisciplinary Information Seeking Process via Human-AI Co-Exploration +Authors: Chengbo Zheng, Yuanhao Zhang, Zeyu Huang, Chuhan Shi, Minrui Xu, Xiaojuan Ma + +[Link](https://programs.sigchi.org/uist/2024/program/content/170741) + +Abstract: Interdisciplinary studies often require researchers to explore literature in diverse branches of knowledge. Yet, navigating through the highly scattered knowledge from unfamiliar disciplines poses a significant challenge. In this paper, we introduce DiscipLink, a novel interactive system that facilitates collaboration between researchers and large language models (LLMs) in interdisciplinary information seeking (IIS). Based on users' topic of interest, DiscipLink initiates exploratory questions from the perspectives of possible relevant fields of study, and users can further tailor these questions. DiscipLink then supports users in searching and screening papers under selected questions by automatically expanding queries with disciplinary-specific terminologies, extracting themes from retrieved papers, and highlighting the connections between papers and questions. Our evaluation, comprising a within-subject comparative experiment and an open-ended exploratory study, reveals that DiscipLink can effectively support researchers in breaking down disciplinary boundaries and integrating scattered knowledge in diverse fields. The findings underscore the potential of LLM-powered tools in fostering information-seeking practices and bolstering interdisciplinary research. + + + +### Improving Steering and Verification in AI-Assisted Data Analysis with Interactive Task Decomposition +Authors: Majeed Kazemitabaar, Jack Williams, Ian Drosos, Tovi Grossman, Austin Henley, Carina Negreanu, Advait Sarkar + +[Link](https://programs.sigchi.org/uist/2024/program/content/170918) + +Abstract: LLM-powered tools like ChatGPT Data Analysis, have the potential to help users tackle the challenging task of data analysis programming, which requires expertise in data processing, programming, and statistics. However, our formative study (n=15) uncovered serious challenges in verifying AI-generated results and steering the AI (i.e., guiding the AI system to produce the desired output). We developed two contrasting approaches to address these challenges. The first (Stepwise) decomposes the problem into step-by-step subgoals with pairs of editable assumptions and code until task completion, while the second (Phasewise) decomposes the entire problem into three editable, logical phases: structured input/output assumptions, execution plan, and code. A controlled, within-subjects experiment (n=18) compared these systems against a conversational baseline. Users reported significantly greater control with the Stepwise and Phasewise systems, and found intervention, correction, and verification easier, compared to the baseline. The results suggest design guidelines and trade-offs for AI-assisted data analysis tools. + + + + +### VizGroup: An AI-assisted Event-driven System for Collaborative Programming Learning Analytics +Authors: Xiaohang Tang, Sam Wong, Kevin Pu, Xi Chen, Yalong Yang, Yan Chen + +[Link](https://programs.sigchi.org/uist/2024/program/content/170725) + +Abstract: Programming instructors often conduct collaborative learning activities, like Peer Instruction, to foster a deeper understanding in students and enhance their engagement with learning. These activities, however, may not always yield productive outcomes due to the diversity of student mental models and their ineffective collaboration. In this work, we introduce VizGroup, an AI-assisted system that enables programming instructors to easily oversee students' real-time collaborative learning behaviors during large programming courses. VizGroup leverages Large Language Models (LLMs) to recommend event specifications for instructors so that they can simultaneously track and receive alerts about key correlation patterns between various collaboration metrics and ongoing coding tasks. We evaluated VizGroup with 12 instructors in a comparison study using a dataset collected from a Peer Instruction activity that was conducted in a large programming lecture. +The results showed that VizGroup helped instructors effectively overview, narrow down, and track nuances throughout students' behaviors. + + + +### Who did it? How User Agency is influenced by Visual Properties of Generated Images +Authors: Johanna Didion, Krzysztof Wolski, Dennis Wittchen, David Coyle, Thomas Leimkühler, Paul Strohmeier + +[Link](https://programs.sigchi.org/uist/2024/program/content/170827) + +Abstract: The increasing proliferation of AI and GenAI requires new interfaces tailored to how their specific affordances and human requirements meet. As GenAI is capable of taking over tasks from users on an unprecedented scale, designing the experience of agency -- if and how users experience control over the process and responsibility over the outcome -- is crucial. As an initial step towards design guidelines for shaping agency, we present a study that explores how features of AI-generated images influence users' experience of agency. We use two measures; temporal binding to implicitly estimate pre-reflective agency and magnitude estimation to assess user judgments of agency. We observe that abstract images lead to more temporal binding than images with semantic meaning. In contrast, the closer an image aligns with what a user might expect, the higher the agency judgment. When comparing the experiment results with objective metrics of image differences, we find that temporal binding results correlate with semantic differences, while agency judgments are better explained by local differences between images. This work contributes towards a future where agency is considered an important design dimension for GenAI interfaces. + + + +### FathomGPT: A Natural Language Interface for Interactively Exploring Ocean Science Data +Authors: Nabin Khanal, Chun Meng Yu, Jui-Cheng Chiu, Anav Chaudhary, Ziyue Zhang, Kakani Katija, Angus Forbes + +[Link](https://programs.sigchi.org/uist/2024/program/content/171001) + +Abstract: We introduce FathomGPT, an open source system for the interactive investigation of ocean science data via a natural language interface. FathomGPT was developed in close collaboration with marine scientists to enable researchers and ocean enthusiasts to explore and analyze the FathomNet image database. FathomGPT provides a custom information retrieval pipeline that leverages OpenAI’s large language models to enable: the creation of complex queries to retrieve images, taxonomic information, and scientific measurements; mapping common names and morphological features to scientific names; generating interactive charts on demand; and searching by image or specified patterns within an image. In designing FathomGPT, particular emphasis was placed on enhancing the user's experience by facilitating free-form exploration and optimizing response times. We present an architectural overview and implementation details of FathomGPT, along with a series of ablation studies that demonstrate the effectiveness of our approach to name resolution, fine tuning, and prompt modification. Additionally, we present usage scenarios of interactive data exploration sessions and document feedback from ocean scientists and machine learning experts. + + + +### VRCopilot: Authoring 3D Layouts with Generative AI Models in VR +Authors: Lei Zhang, Jin Pan, Jacob Gettig, Steve Oney, Anhong Guo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170933) + +Abstract: Immersive authoring provides an intuitive medium for users to create 3D scenes via direct manipulation in Virtual Reality (VR). Recent advances in generative AI have enabled the automatic creation of realistic 3D layouts. However, it is unclear how capabilities of generative AI can be used in immersive authoring to support fluid interactions, user agency, and creativity. We introduce VRCopilot, a mixed-initiative system that integrates pre-trained generative AI models into immersive authoring to facilitate human-AI co-creation in VR. VRCopilot presents multimodal interactions to support rapid prototyping and iterations with AI, and intermediate representations such as wireframes to augment user controllability over the created content. Through a series of user studies, we evaluated the potential and challenges in manual, scaffolded, and automatic creation in immersive authoring. We found that scaffolded creation using wireframes enhanced the user agency compared to automatic creation. We also found that manual creation via multimodal specification offers the highest sense of creativity and agency. + + + + +## Break Q&A: Validation in AI/ML +### Natural Expression of a Machine Learning Model's Uncertainty Through Verbal and Non-Verbal Behavior of Intelligent Virtual Agents +Authors: Susanne Schmidt, Tim Rolff, Henrik Voigt, Micha Offe, Frank Steinicke + +[Link](https://programs.sigchi.org/uist/2024/program/content/170826) + +Abstract: Uncertainty cues are inherent in natural human interaction, as they signal to communication partners how much they can rely on conveyed information. Humans subconsciously provide such signals both verbally (e.g., through expressions such as "maybe" or "I think") and non-verbally (e.g., by diverting their gaze). In contrast, artificial intelligence (AI)-based services and machine learning (ML) models such as ChatGPT usually do not disclose the reliability of answers to their users. +In this paper, we explore the potential of combining ML models as powerful information sources with human means of expressing uncertainty to contextualize the information. We present a comprehensive pipeline that comprises (1) the human-centered collection of (non-)verbal uncertainty cues, (2) the transfer of cues to virtual agent videos, (3) the annotation of videos for perceived uncertainty, and (4) the subsequent training of a custom ML model that can generate uncertainty cues in virtual agent behavior. In a final step (5), the trained ML model is evaluated in terms of both fidelity and generalizability of the generated (non-)verbal uncertainty behavior. + + + +### Who Validates the Validators? Aligning LLM-Assisted Evaluation of LLM Outputs with Human Preferences +Authors: Shreya Shankar, J.D. Zamfirescu-Pereira, Bjoern Hartmann, Aditya Parameswaran, Ian Arawjo + +[Link](https://programs.sigchi.org/uist/2024/program/content/170954) + +Abstract: Due to the cumbersome nature of human evaluation and limitations of code-based evaluation, Large Language Models (LLMs) are increasingly being used to assist humans in evaluating LLM outputs. Yet LLM-generated evaluators simply inherit all the problems of the LLMs they evaluate, requiring further human validation. We present a mixed-initiative approach to “validate the validators”— aligning LLM-generated evaluation functions (be it prompts or code) with human requirements. Our interface, EvalGen, provides automated assistance to users in generating evaluation criteria and implementing assertions. While generating candidate implementations (Python functions, LLM grader prompts), EvalGen asks humans to grade a subset of LLM outputs; this feedback is used to select implementations that better align with user grades. A qualitative study finds overall support for EvalGen but underscores the subjectivity and iterative nature of alignment. In particular, we identify a phenomenon we dub criteria drift: users need criteria to grade outputs, but grading outputs helps users define criteria. What is more, some criteria appear dependent on the specific LLM outputs observed (rather than independent and definable a priori), raising serious questions for approaches that assume the independence of evaluation from observation of model outputs. We present our interface and implementation details, a comparison of our algorithm with a baseline approach, and implications for the design of future LLM evaluation assistants. + + + +### LlamaTouch: A Faithful and Scalable Testbed for Mobile UI Task Automation +Authors: Li Zhang, Shihe Wang, Xianqing Jia, Zhihan Zheng, Yunhe Yan, Longxi Gao, Yuanchun Li, Mengwei Xu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170831) + +Abstract: The emergent large language/multimodal models facilitate the evolution of mobile agents, especially in mobile UI task automation. However, existing evaluation approaches, which rely on human validation or established datasets to compare agent-predicted actions with predefined action sequences, are unscalable and unfaithful. To overcome these limitations, this paper presents LlamaTouch, a testbed for on-device mobile UI task execution and faithful, scalable task evaluation. By observing that the task execution process only transfers UI states, LlamaTouch employs a novel evaluation approach that only assesses whether an agent traverses all manually annotated, essential application/system states. LlamaTouch comprises three key techniques: (1) On-device task execution that enables mobile agents to interact with realistic mobile environments for task execution. (2) Fine-grained UI component annotation that merges pixel-level screenshots and textual screen hierarchies to explicitly identify and precisely annotate essential UI components with a rich set of designed annotation primitives. (3) A multi-level application state matching algorithm that utilizes exact and fuzzy matching to accurately detect critical information in each screen, even with unpredictable UI layout/content dynamics. LlamaTouch currently incorporates four mobile agents and 496 tasks, encompassing both tasks in the widely-used datasets and our self-constructed ones to cover more diverse mobile applications. Evaluation results demonstrate LlamaTouch’s high faithfulness of evaluation in real-world mobile environments and its better scalability than human validation. LlamaTouch also enables easy task annotation and integration of new mobile agents. Code and dataset are publicly available at https://github.com/LlamaTouch/LlamaTouch. + + + +### Clarify: Improving Model Robustness With Natural Language Corrections +Authors: Yoonho Lee, Michelle Lam, Helena Vasconcelos, Michael Bernstein, Chelsea Finn + +[Link](https://programs.sigchi.org/uist/2024/program/content/170784) + +Abstract: The standard way to teach models is by feeding them lots of data. However, this approach often teaches models incorrect ideas because they pick up on misleading signals in the data. To prevent such misconceptions, we must necessarily provide additional information beyond the training data. Prior methods incorporate additional instance-level supervision, such as labels for misleading features or additional labels for debiased data. However, such strategies require a large amount of labeler effort. We hypothesize that people are good at providing textual feedback at the concept level, a capability that existing teaching frameworks do not leverage. We propose Clarify, a novel interface and method for interactively correcting model misconceptions. Through Clarify, users need only provide a short text description of a model's consistent failure patterns. Then, in an entirely automated way, we use such descriptions to improve the training process. Clarify is the first end-to-end system for user model correction. Our user studies show that non-expert users can successfully describe model misconceptions via Clarify, leading to increased worst-case performance in two datasets. We additionally conduct a case study on a large-scale image dataset, ImageNet, using Clarify to find and rectify 31 novel hard subpopulations. + + + +### "The Data Says Otherwise" – Towards Automated Fact-checking and Communication of Data Claims +Authors: Yu Fu, Shunan Guo, Jane Hoffswell, Victor S. Bursztyn, Ryan Rossi, John Stasko + +[Link](https://programs.sigchi.org/uist/2024/program/content/170762) + +Abstract: Fact-checking data claims requires data evidence retrieval and analysis, which can become tedious and intractable when done manually. This work presents Aletheia, an automated fact-checking prototype designed to facilitate data claims verification and enhance data evidence communication. For verification, we utilize a pre-trained LLM to parse the semantics for evidence retrieval. To effectively communicate the data evidence, we design representations in two forms: data tables and visualizations, tailored to various data fact types. Additionally, we design interactions that showcase a real-world application of these techniques. We evaluate the performance of two core NLP tasks with a curated dataset comprising 400 data claims and compare the two representation forms regarding viewers’ assessment time, confidence, and preference via a user study with 20 participants. The evaluation offers insights into the feasibility and bottlenecks of using LLMs for data fact-checking tasks, potential advantages and disadvantages of using visualizations over data tables, and design recommendations for presenting data evidence. + + + + +## Break Q&A: Bodily Signals +### Empower Real-World BCIs with NIRS-X: An Adaptive Learning Framework that Harnesses Unlabeled Brain Signals +Authors: Liang Wang, Jiayan Zhang, Jinyang Liu, Devon McKeon, David Guy Brizan, Giles Blaney, Robert Jacob + +[Link](https://programs.sigchi.org/uist/2024/program/content/170939) + +Abstract: Brain-Computer Interfaces (BCIs) using functional near-infrared spectroscopy (fNIRS) hold promise for future interactive user interfaces due to their ease of deployment and declining cost. However, they typically require a separate calibration process for each user and task, which can be burdensome. Machine learning helps, but faces a data scarcity problem. Due to inherent inter-user variations in physiological data, it has been typical to create a new annotated training dataset for every new task and user. To reduce dependence on such extensive data collection and labeling, we present an adaptive learning framework, NIRS-X, to harness more easily accessible unlabeled fNIRS data. NIRS-X includes two key components: NIRSiam and NIRSformer. We use the NIRSiam algorithm to extract generalized brain activity representations from unlabeled fNIRS data obtained from previous users and tasks, and then transfer that knowledge to new users and tasks. In conjunction, we design a neural network, NIRSformer, tailored for capturing both local and global, spatial and temporal relationships in multi-channel fNIRS brain input signals. By using unlabeled data from both a previously released fNIRS2MW visual $n$-back dataset and a newly collected fNIRS2MW audio $n$-back dataset, NIRS-X demonstrates its strong adaptation capability to new users and tasks. Results show comparable or superior performance to supervised methods, making NIRS-X promising for real-world fNIRS-based BCIs. + + + +### Understanding the Effects of Restraining Finger Coactivation in Mid-Air Typing: from a Neuromechanical Perspective +Authors: Hechuan Zhang, Xuewei Liang, Ying Lei, Yanjun Chen, Zhenxuan He, Yu Zhang, Lihan Chen, Hongnan Lin, Teng Han, Feng Tian + +[Link](https://programs.sigchi.org/uist/2024/program/content/170941) + +Abstract: Typing in mid-air is often perceived as intuitive yet presents challenges due to finger coactivation, a neuromechanical phenomenon that involves involuntary finger movements stemming from the lack of physical constraints. Previous studies were used to examine and address the impacts of finger coactivation using algorithmic approaches. Alternatively, this paper explores the neuromechanical effects of finger coactivation on mid-air typing, aiming to deepen our understanding and provide valuable insights to improve these interactions. We utilized a wearable device that restrains finger coactivation as a prop to conduct two mid-air studies, including a rapid finger-tapping task and a ten-finger typing task. The results revealed that restraining coactivation not only reduced mispresses, which is a classic coactivated error always considered as harm caused by coactivation. Unexpectedly, the reduction of motor control errors and spelling errors, thinking as non-coactivated errors, also be observed. +Additionally, the study evaluated the neural resources involved in motor execution using functional Near Infrared Spectroscopy (fNIRS), which tracked cortical arousal during mid-air typing. The findings demonstrated decreased activation in the primary motor cortex of the left hemisphere when coactivation was restrained, suggesting a diminished motor execution load. This reduction suggests that a portion of neural resources is conserved, which also potentially aligns with perceived lower mental workload and decreased frustration levels. + + + +### What is Affective Touch Made Of? A Soft Capacitive Sensor Array Reveals the Interplay between Shear, Normal Stress and Individuality +Authors: Devyani McLaren, Jian Gao, Xiulun Yin, Rúbia Reis Guerra, Preeti Vyas, Chrys Morton, Xi Laura Cang, Yizhong Chen, Yiyuan Sun, Ying Li, John Madden, Karon MacLean + +[Link](https://programs.sigchi.org/uist/2024/program/content/171010) + +Abstract: Humans physically express emotion by modulating parameters that register on mammalian skin mechanoreceptors, but are unavailable in current touch-sensing technology. +Greater sensory richness combined with data on affect-expression composition is a prerequisite to estimating affect from touch, with applications including physical human-robot interaction. To examine shear alongside more easily captured normal stresses, we tailored recent capacitive technology to attain performance suitable for affective touch, creating a flexible, reconfigurable and soft 36-taxel array that detects multitouch normal and 2-dimensional shear at ranges of 1.5kPa-43kPa and $\pm$ 0.3-3.8kPa respectively, wirelessly at ~43Hz (1548 taxels/s). In a deep-learning classification of 9 gestures (N=16), inclusion of shear data improved accuracy to 88\%, compared to 80\% with normal stress data alone, confirming shear stress's expressive centrality. +Using this rich data, we analyse the interplay of sensed-touch features, gesture attributes and individual differences, propose affective-touch sensing requirements, and share technical considerations for performance and practicality. + + + +### Exploring the Effects of Sensory Conflicts on Cognitive Fatigue in VR Remappings +HONORABLE_MENTION + +Authors: Tianren Luo, Gaozhang Chen, Yijian Wen, Pengxiang Wang, yachun fan, Teng Han, Feng Tian + +[Link](https://programs.sigchi.org/uist/2024/program/content/171000) + +Abstract: Virtual reality (VR) is found to present significant cognitive challenges due to its immersive nature and frequent sensory conflicts. This study systematically investigates the impact of sensory conflicts induced by VR remapping techniques on cognitive fatigue, and unveils their correlation. We utilized three remapping methods (haptic repositioning, head-turning redirection, and giant resizing) to create different types of sensory conflicts, and measured perceptual thresholds to induce various intensities of the conflicts. Through experiments involving cognitive tasks along with subjective and physiological measures, we found that all three remapping methods influenced the onset and severity of cognitive fatigue, with visual-vestibular conflict having the greatest impact. Interestingly, visual-experiential/memory conflict showed a mitigating effect on cognitive fatigue, emphasizing the role of novel sensory experiences. This study contributes to a deeper understanding of cognitive fatigue under sensory conflicts and provides insights for designing VR experiences that align better with human perceptual and cognitive capabilities. + + + + +## Break Q&A: Future Fabrics +### ScrapMap: Interactive Color Layout for Scrap Quilting +Authors: Mackenzie Leake, Ross Daly + +[Link](https://programs.sigchi.org/uist/2024/program/content/170743) + +Abstract: Scrap quilting is a popular sewing process that involves combining leftover pieces of fabric into traditional patchwork designs. Imagining the possibilities for these leftovers and arranging the fabrics in such a way that achieves visual goals, such as high contrast, can be challenging given the large number of potential fabric assignments within the quilt's design. We formulate the task of designing a scrap quilt as a graph coloring problem with domain-specific coloring and material constraints. Our interactive tool called ScrapMap helps quilters explore these potential designs given their available materials by leveraging the hierarchy of scrap quilt construction (e.g., quilt blocks and motifs) and providing user-directed automatic block coloring suggestions. Our user evaluation indicates that quilters find ScrapMap useful for helping them consider new ways to use their scraps and create visually striking quilts. + + + +### What's in a cable? Abstracting Knitting Design Elements with Blended Raster/Vector Primitives +Authors: Hannah Twigg-Smith, Yuecheng Peng, Emily Whiting, Nadya Peek + +[Link](https://programs.sigchi.org/uist/2024/program/content/170811) + +Abstract: In chart-based programming environments for machine knitting, patterns are specified at a low level by placing operations on a grid. This highly manual workflow makes it challenging to iterate on design elements such as cables, colorwork, and texture. While vector-based abstractions for knitting design elements may facilitate higher-level manipulation, they often include interdependencies which require stitch-level reconciliation. To address this, we contribute a new way of specifying knits with blended vector and raster primitives. Our abstraction supports the design of interdependent elements like colorwork and texture. We have implemented our blended raster/vector specification in a direct manipulation design tool where primitives are layered and rasterized, allowing for simulation of the resulting knit structure and generation of machine instructions. Through examples, we show how our approach enables higher-level manipulation of various knitting techniques, including intarsia colorwork, short rows, and cables. Specifically, we show how our tool supports the design of complex patterns including origami pleat patterns and capacitive sensor patches. + + + +### Embrogami: Shape-Changing Textiles with Machine Embroidery +Authors: Yu Jiang, Alice Haynes, Narjes Pourjafarian, Jan Borchers, Jürgen Steimle + +[Link](https://programs.sigchi.org/uist/2024/program/content/170971) + +Abstract: Machine embroidery is a versatile technique for creating custom and entirely fabric-based patterns on thin and conformable textile surfaces. However, existing machine-embroidered surfaces remain static, limiting the interactions they can support. We introduce Embrogami, an approach for fabricating textile structures with versatile shape-changing behaviors. Inspired by origami, we leverage machine embroidery to form finger-tip-scale mountain-and-valley structures on textiles with customized shapes, bistable or elastic behaviors, and modular composition. The structures can be actuated by the user or the system to modify the local textile surface topology, creating interactive elements like toggles and sliders or textile shape displays with an ultra-thin, flexible, and integrated form factor. We provide a dedicated software tool and report results of technical experiments to allow users to flexibly design, fabricate, and deploy customized Embrogami structures. With four application cases, we showcase Embrogami’s potential to create functional and flexible shape-changing textiles with diverse visuo-tactile feedback. + + + +### KODA: Knit-program Optimization by Dependency Analysis +Authors: Megan Hofmann + +[Link](https://programs.sigchi.org/uist/2024/program/content/170935) + +Abstract: Digital knitting machines have the capability to reliably manufacture seamless, textured, and multi-material garments, but these capabilities are obscured by limiting CAD tools. Recent innovations in computational knitting build on emerging programming infrastructure that gives full access to the machine's capabilities but requires an extensive understanding of machine operations and execution. In this paper, we contribute a critical missing piece of the knitting-machine programming pipeline--a program optimizer. Program optimization allows programmers to focus on developing novel algorithms that produce desired fabrics while deferring concerns of efficient machine operations to the optimizer. We present KODA, the Knit-program Optimization by Dependency Analysis method. KODA re-orders and reduces machine instructions to reduce knitting time, increase knitting reliability, and manage boilerplate operations that adjust the machine state. The result is a system that enables programmers to write readable and intuitive knitting algorithms while producing efficient and verified programs. + + + +### X-Hair: 3D Printing Hair-like Structures with Multi-form, Multi-property and Multi-function +Authors: Guanyun Wang, Junzhe Ji, Yunkai Xu, Lei Ren, Xiaoyang Wu, Chunyuan Zheng, Xiaojing Zhou, Xin Tang, Boyu Feng, Lingyun Sun, Ye Tao, Jiaji Li + +[Link](https://programs.sigchi.org/uist/2024/program/content/171007) + +Abstract: In this paper, we present X-Hair, a method that enables 3D-printed hair with various forms, properties, and functions. We developed a two-step suspend printing strategy to fabricate hair-like structures in different forms (e.g. fluff, bristle, barb) by adjusting parameters including Extrusion Length Ratio and Total Length. Moreover, a design tool is also established for users to customize hair-like structures with various properties (e.g. pointy, stiff, soft) on imported 3D models, which virtually shows the results for previewing and generates G-code files for 3D printing. We demonstrate the design space of X-Hair and evaluate the properties of them with different parameters. Through a series of applications with hair-like structures, we validate X-hair's practical usage of biomimicry, decoration, heat preservation, adhesion, and haptic interaction. + + + +### TouchpadAnyWear: Textile-Integrated Tactile Sensors for Multimodal High Spatial-Resolution Touch Inputs with Motion Artifacts Tolerance +Authors: Junyi Zhao, Pornthep Preechayasomboon, Tyler Christensen, Amirhossein H. Memar, Zhenzhen Shen, Nick Colonnese, Michael Khbeis, Mengjia Zhu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170873) + +Abstract: This paper presents TouchpadAnyWear, a novel family of textile-integrated force sensors capable of multi-modal touch input, encompassing micro-gesture detection, two-dimensional (2D) continuous input, and force-sensitive strokes. This thin (\textless 1.5~mm) and conformal device features high spatial resolution sensing and motion artifact tolerance through its unique capacitive sensor architecture. The sensor consists of a knitted textile compressive core, sandwiched by stretchable silver electrodes, and conductive textile shielding layers on both sides. With a high-density sensor pixel array (25/cm\textsuperscript{2}), TouchpadAnyWear can detect touch input locations and sizes with millimeter-scale spatial resolution and a wide range of force inputs (0.05~N to 20~N). The incorporation of miniature polymer domes, referred to as ``poly-islands'', onto the knitted textile locally stiffens the sensing areas, thereby reducing motion artifacts during deformation. These poly-islands also provide passive tactile feedback to users, allowing for eyes-free localization of the active sensing pixels. Design choices and sensor performance are evaluated using in-depth mechanical characterization. Demonstrations include an 8-by-8 grid sensor as a miniature high-resolution touchpad and a T-shaped sensor for thumb-to-finger micro-gesture input. User evaluations validate the effectiveness and usability of TouchpadAnyWear in daily interaction contexts, such as tapping, forceful pressing, swiping, 2D cursor control, and 2D stroke-based gestures. This paper further discusses potential applications and explorations for TouchpadAnyWear in wearable smart devices, gaming, and augmented reality devices. + + + + +## Break Q&A: Dynamic Objects & Materials +### MagneDot: Integrated Fabrication and Actuation Methods of Dot-Based Magnetic Shape Displays +Authors: Lingyun Sun, Yitao Fan, Boyu Feng, Yifu Zhang, Deying Pan, Yiwen Ren, Yuyang Zhang, Qi Wang, Ye Tao, Guanyun Wang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170860) + +Abstract: This paper presents MagneDot, a novel method for making interactive magnetic shape displays through an integrated fabrication process. Magnetic soft materials can potentially create fast, responsive morphing structures for interactions. However, novice users and designers typically do not have access to sophisticated equipment and materials or cannot afford heavy labor to create interactive objects based on this material. Modified from an open-source 3D printer, the fabrication system of MagneDot integrates the processes of mold-making, pneumatic extrusion, magnetization, and actuation, using cost-effective materials only. By providing a design tool, MagneDot allows users to generate G-codes for fabricating and actuating displays of various morphing effects. Finally, a series of design examples demonstrate the possibilities of shape displays enabled by MagneDot. + + + +### CARDinality: Interactive Card-shaped Robots with Locomotion and Haptics using Vibration +Authors: Aditya Retnanto, Emilie Faracci, Anup Sathya, Yu-Kai Hung, Ken Nakagaki + +[Link](https://programs.sigchi.org/uist/2024/program/content/170995) + +Abstract: This paper introduces a novel approach to interactive robots by leveraging the form-factor of cards to create thin robots equipped with vibrational capabilities for locomotion and haptic feedback. The system is composed of flat-shaped robots with on-device sensing and wireless control, which offer lightweight portability and scalability. This research introduces a hardware prototype to explore the possibility of ‘vibration-based omni-directional sliding locomotion’. Applications include augmented card playing, educational tools, and assistive technology, which showcase CARDinality’s versatility in tangible interaction. + + + + +### PortaChrome: A Portable Contact Light Source for Integrated Re-Programmable Multi-Color Textures +Authors: Yunyi Zhu, Cedric Honnet, Yixiao Kang, Junyi Zhu, Angelina Zheng, Kyle Heinz, Grace Tang, Luca Musk, Michael Wessely, Stefanie Mueller + +[Link](https://programs.sigchi.org/uist/2024/program/content/170742) + +Abstract: In this paper, we present PortaChrome, a portable light source that can be attached to everyday objects to reprogram the color and texture of surfaces that come in contact with them. When PortaChrome makes contact with objects previously coated with photochromic dye, the UV and RGB LEDs inside PortaChrome create multi-color textures on the objects. In contrast to prior work, which used projectors for the color-change, PortaChrome has a thin and flexible form factor, which allows the color-change process to be integrated into everyday user interaction. Because of the close distance between the light source and the photochromic object, PortaChrome creates color textures in less than 4 minutes on average, which is 8 times faster than prior work. We demonstrate PortaChrome with four application examples, including data visualizations on textiles and dynamic designs on wearables. + + + +### Augmented Object Intelligence with XR-Objects +Authors: Mustafa Doga Dogan, Eric Gonzalez, Karan Ahuja, Ruofei Du, Andrea Colaço, Johnny Lee, Mar Gonzalez-Franco, David Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170733) + +Abstract: Seamless integration of physical objects as interactive digital entities remains a challenge for spatial computing. This paper explores Augmented Object Intelligence (AOI) in the context of XR, an interaction paradigm that aims to blur the lines between digital and physical by equipping real-world objects with the ability to interact as if they were digital, where every object has the potential to serve as a portal to digital functionalities. Our approach utilizes real-time object segmentation and classification, combined with the power of Multimodal Large Language Models (MLLMs), to facilitate these interactions without the need for object pre-registration. We implement the AOI concept in the form of XR-Objects, an open-source prototype system that provides a platform for users to engage with their physical environment in contextually relevant ways using object-based context menus. This system enables analog objects to not only convey information but also to initiate digital actions, such as querying for details or executing tasks. Our contributions are threefold: (1) we define the AOI concept and detail its advantages over traditional AI assistants, (2) detail the XR-Objects system’s open-source design and implementation, and (3) show its versatility through various use cases and a user study. + + + + +## Break Q&A: Prototyping +### ProtoDreamer: A Mixed-prototype Tool Combining Physical Model and Generative AI to Support Conceptual Design +Authors: Hongbo ZHANG, Pei Chen, Xuelong Xie, Chaoyi Lin, Lianyan Liu, Zhuoshu Li, Weitao You, Lingyun Sun + +[Link](https://programs.sigchi.org/uist/2024/program/content/170974) + +Abstract: Prototyping serves as a critical phase in the industrial conceptual design process, enabling exploration of problem space and identification of solutions. Recent advancements in large-scale generative models have enabled AI to become a co-creator in this process. However, designers often consider generative AI challenging due to the necessity to follow computer-centered interaction rules, diverging from their familiar design materials and languages. Physical prototype is a commonly used design method, offering unique benefits in prototype process, such as intuitive understanding and tangible testing. In this study, we propose ProtoDreamer, a mixed-prototype tool that synergizes generative AI with physical prototype to support conceptual design. ProtoDreamer allows designers to construct preliminary prototypes using physical materials, while AI recognizes these forms and vocal inputs to generate diverse design alternatives. This tool empowers designers to tangibly interact with prototypes, intuitively convey design intentions to AI, and continuously draw inspiration from the generated artifacts. An evaluation study confirms ProtoDreamer’s utility and strengths in time efficiency, creativity support, defects exposure, and detailed thinking facilitation. + + + +### TorqueCapsules: Fully-Encapsulated Flywheel Actuation Modules for Designing and Prototyping Movement-Based and Kinesthetic Interaction +Authors: Willa Yunqi Yang, Yifan Zou, Jingle Huang, Raouf Abujaber, Ken Nakagaki + +[Link](https://programs.sigchi.org/uist/2024/program/content/170857) + +Abstract: Flywheels are unique, versatile actuators that store and convert kinetic energy to torque, widely utilized in aerospace, robotics, haptics, and more. However, prototyping interaction using flywheels is not trivial due to safety concerns, unintuitive operation, and implementation challenges. +We present TorqueCapsules: self-contained, fully-encapsulated flywheel actuation modules that make the flywheel actuators easy to control, safe to interact with, and quick to reconfigure and customize. By fully encapsulating the actuators with a wireless microcontroller, a battery, and other components, the module can be readily attached, embedded, or stuck to everyday objects, worn to people’s bodies, or combined with other devices. With our custom GUI, both novices and expert users can easily control multiple modules to design and prototype movements and kinesthetic haptics unique to flywheel actuation. We demonstrate various applications, including actuated everyday objects, wearable haptics, and expressive robots. We conducted workshops for novices and experts to employ TorqueCapsules to collect qualitative feedback and further application examples. + + + +### AniCraft: Crafting Everyday Objects as Physical Proxies for Prototyping 3D Character Animation in Mixed Reality +Authors: Boyu Li, Linping Yuan, Zhe Yan, Qianxi Liu, Yulin Shen, Zeyu Wang + +[Link](https://programs.sigchi.org/uist/2024/program/content/170881) + +Abstract: We introduce AniCraft, a mixed reality system for prototyping 3D character animation using physical proxies crafted from everyday objects. Unlike existing methods that require specialized equipment to support the use of physical proxies, AniCraft only requires affordable markers, webcams, and daily accessible objects and materials. AniCraft allows creators to prototype character animations through three key stages: selection of virtual characters, fabrication of physical proxies, and manipulation of these proxies to animate the characters. This authoring workflow is underpinned by diverse physical proxies, manipulation types, and mapping strategies, which ease the process of posing virtual characters and mapping user interactions with physical proxies to animated movements of virtual characters. We provide a range of cases and potential applications to demonstrate how diverse physical proxies can inspire user creativity. User experiments show that our system can outperform traditional animation methods for rapid prototyping. Furthermore, we provide insights into the benefits and usage patterns of different materials, which lead to design implications for future research. + + + +### Mul-O: Encouraging Olfactory Innovation in Various Scenarios Through a Task-Oriented Development Platform +Authors: Peizhong Gao, Fan Liu, Di Wen, Yuze Gao, Linxin Zhang, Chikelei Wang, Qiwei Zhang, Yu Zhang, Shao-en Ma, Qi Lu, Haipeng Mi, YINGQING XU + +[Link](https://programs.sigchi.org/uist/2024/program/content/170886) + +Abstract: Olfactory interfaces are pivotal in HCI, yet their development is hindered by limited application scenarios, stifling the discovery of new research opportunities. This challenge primarily stems from existing design tools focusing predominantly on odor display devices and the creation of standalone olfactory experiences, rather than enabling rapid adaptation to various contexts and tasks. Addressing this, we introduce Mul-O, a novel task-oriented development platform crafted to aid semi-professionals in navigating the diverse requirements of potential application scenarios and effectively prototyping ideas. +Mul-O facilitates the swift association and integration of olfactory experiences into functional designs, system integrations, and concept validations. Comprising a web UI for task-oriented development, an API server for seamless third-party integration, and wireless olfactory display hardware, Mul-O significantly enhances the ideation and prototyping process in multisensory tasks. This was verified by a 15-day workshop attended by 30 participants. The workshop produced seven innovative projects, underscoring Mul-O's efficacy in fostering olfactory innovation. + + + + +## Break Q&A: New Vizualizations +### VisCourt: In-Situ Guidance for Interactive Tactic Training in Mixed Reality +Authors: Liqi Cheng, Hanze Jia, Lingyun Yu, Yihong Wu, Shuainan Ye, Dazhen Deng, Hui Zhang, Xiao Xie, Yingcai Wu + +[Link](https://programs.sigchi.org/uist/2024/program/content/170791) + +Abstract: In team sports like basketball, understanding and executing tactics---coordinated plans of movements among players---are crucial yet complex, requiring extensive practice. These tactics require players to develop a keen sense of spatial and situational awareness. Traditional coaching methods, which mainly rely on basketball tactic boards and video instruction, often fail to bridge the gap between theoretical learning and the real-world application of tactics, due to shifts in view perspectives and a lack of direct experience with tactical scenarios. To address this challenge, we introduce VisCourt, a Mixed Reality (MR) tactic training system, in collaboration with a professional basketball team. To set up the MR training environment, we employed semi-automatic methods to simulate realistic 3D tactical scenarios and iteratively designed visual in-situ guidance. This approach enables full-body engagement in interactive training sessions on an actual basketball court and provides immediate feedback, significantly enhancing the learning experience. A user study with athletes and enthusiasts shows the effectiveness and satisfaction with VisCourt in basketball training and offers insights for the design of future SportsXR training systems. + + + +### Block and Detail: Scaffolding Sketch-to-Image Generation +Authors: Vishnu Sarukkai, Lu Yuan, Mia Tang, Maneesh Agrawala, Kayvon Fatahalian + +[Link](https://programs.sigchi.org/uist/2024/program/content/170911) + +Abstract: We introduce a novel sketch-to-image tool that aligns with the iterative refinement process of artists. Our tool lets users sketch blocking strokes to coarsely represent the placement and form of objects and detail strokes to refine their shape and silhouettes. We develop a two-pass algorithm for generating high-fidelity images from such sketches at any point in the iterative process. In the first pass we use a ControlNet to generate an image that strictly follows all the strokes (blocking and detail) and in the second pass we add variation by renoising regions surrounding blocking strokes. We also present a dataset generation scheme that, when used to train a ControlNet architecture, allows regions that do not contain strokes to be interpreted as not-yet-specified regions rather than empty space. We show that this partial-sketch-aware ControlNet can generate coherent elements from partial sketches that only contain a small number of strokes. The high-fidelity images produced by our approach serve as scaffolds that can help the user adjust the shape and proportions of objects or add additional elements to the composition. We demonstrate the effectiveness of our approach with a variety of examples and evaluative comparisons. Quantitatively, novice viewers prefer the quality of images from our algorithm over a baseline Scribble ControlNet for 82% of the pairs and found our images had less distortion in 80% of the pairs. + + + +### EVE: Enabling Anyone to Train Robots using Augmented Reality +Authors: Jun Wang, Chun-Cheng Chang, Jiafei Duan, Dieter Fox, Ranjay Krishna + +[Link](https://programs.sigchi.org/uist/2024/program/content/170803) + +Abstract: The increasing affordability of robot hardware is accelerating the integration of robots into everyday activities. However, training a robot to automate a task requires expensive trajectory data where a trained human annotator moves a physical robot to train it. Consequently, only those with access to robots produce demonstrations to train robots. In this work, we remove this restriction with EVE, an iOS app that enables everyday users to train robots using intuitive augmented reality visualizations, without needing a physical robot. With EVE, users can collect demonstrations by specifying waypoints with their hands, visually inspecting the environment for obstacles, modifying existing waypoints, and verifying collected trajectories. In a user study (N=14, D=30) consisting of three common tabletop tasks, EVE outperformed three state-of-the-art interfaces in success rate and was comparable to kinesthetic teaching—physically moving a physical robot—in completion time, usability, motion intent communication, enjoyment, and preference (mean of p=0.30). EVE allows users to train robots for personalized tasks, such as sorting desk supplies, organizing ingredients, or setting up board games. We conclude by enumerating limitations and design considerations for future AR-based demonstration collection systems for robotics. + + + +### avaTTAR: Table Tennis Stroke Training with On-body and Detached Visualization in Augmented Reality +Authors: Dizhi Ma, Xiyun Hu, Jingyu Shi, Mayank Patel, Rahul Jain, Ziyi Liu, Zhengzhe Zhu, Karthik Ramani + +[Link](https://programs.sigchi.org/uist/2024/program/content/170894) + +Abstract: Table tennis stroke training is a critical aspect of player development. We designed a new augmented reality (AR) system, avaTTAR, for table tennis stroke training. The system provides both “on-body” (first-person view) and “detached” (third-person view) +visual cues, enabling users to visualize target strokes and correct their attempts effectively with this dual perspectives setup. By employing a combination of pose estimation algorithms and IMU sensors, avaTTAR captures and reconstructs the 3D body pose and paddle orientation of users during practice, allowing real-time comparison with expert strokes. Through a user study, we affirm avaTTAR ’s capacity to amplify player experience and training results + + + + +## Break Q&A: Movement-based UIs +### Feminist Interaction Techniques: Social Consent Signals to Deter NCIM Screenshots +Authors: Li Qiwei, Francesca Lameiro, Shefali Patel, Cristi Isaula-Reyes, Eytan Adar, Eric Gilbert, Sarita Schoenebeck + +[Link](https://programs.sigchi.org/uist/2024/program/content/170858) + +Abstract: Non-consensual Intimate Media (NCIM) refers to the distribution of sexual or intimate content without consent. NCIM is common and causes significant emotional, financial, and reputational harm. We developed Hands-Off, an interaction technique for messaging applications that deters non-consensual screenshots. Hands-Off requires recipients to perform a hand gesture in the air, above the device, to unlock media—which makes simultaneous screenshotting difficult. A lab study shows that Hands-Off gestures are easy +to perform and reduce non-consensual screenshots by 67%. We conclude by generalizing this approach and introduce the idea of Feminist Interaction Techniques (FIT), interaction techniques that encode feminist values and speak to societal problems, and reflect on FIT’s opportunities and limitations. + + + +### Effects of Computer Mouse Lift-off Distance Settings in Mouse Lifting Action +Authors: Munjeong Kim, Sunjun Kim + +[Link](https://programs.sigchi.org/uist/2024/program/content/170957) + +Abstract: This study investigates the effect of Lift-off Distance (LoD) on a computer mouse, which refers to the height at which a mouse sensor stops tracking when lifted off the surface. Although a low LoD is generally preferred to avoid unintentional cursor movement in mouse lifting (=clutching), especially in first-person shooter games, it may reduce tracking stability. +We conducted a psychophysical experiment to measure the perceptible differences between LoD levels and quantitatively measured the unintentional cursor movement error and tracking stability at four levels of LoD while users performed mouse lifting. The results showed a trade-off between movement error and tracking stability at varying levels of LoD. Our findings offer valuable information on optimal LoD settings, which could serve as a guide for choosing a proper mouse device for enthusiastic gamers. + + + +### DisMouse: Disentangling Information from Mouse Movement Data +Authors: Guanhua Zhang, Zhiming Hu, Andreas Bulling + +[Link](https://programs.sigchi.org/uist/2024/program/content/170847) + +Abstract: Mouse movement data contain rich information about users, performed tasks, and user interfaces, but separating the respective components remains challenging and unexplored. As a first step to address this challenge, we propose DisMouse – the first method to disentangle user-specific and user-independent information and stochastic variations from mouse movement data. At the core of our method is an autoencoder trained in a semi-supervised fashion, consisting of a self-supervised denoising diffusion process and a supervised contrastive user identification module. Through evaluations on three datasets, we show that DisMouse 1) captures complementary information of mouse input, hence providing an interpretable framework for modelling mouse movements, 2) can be used to produce refined features, thus enabling various applications such as personalised and variable mouse data generation, and 3) generalises across different datasets. Taken together, our results underline the significant potential of disentangled representation learning for explainable, controllable, and generalised mouse behaviour modelling. + + + +### Wheeler: A Three-Wheeled Input Device for Usable, Efficient, and Versatile Non-Visual Interaction +HONORABLE_MENTION + +Authors: Md Touhidul Islam, Noushad Sojib, Imran Kabir, Ashiqur Rahman Amit, Mohammad Ruhul Amin, Syed Masum Billah + +[Link](https://programs.sigchi.org/uist/2024/program/content/170848) + +Abstract: Blind users rely on keyboards and assistive technologies like screen readers to interact with user interface (UI) elements. In modern applications with complex UI hierarchies, navigating to different UI elements poses a significant accessibility challenge. Users must listen to screen reader audio descriptions and press relevant keyboard keys one at a time. This paper introduces Wheeler, a novel three-wheeled, mouse-shaped stationary input device, to address this issue. Informed by participatory sessions, Wheeler enables blind users to navigate up to three hierarchical levels in an app independently using three wheels instead of navigating just one level at a time using a keyboard. The three wheels also offer versatility, allowing users to repurpose them for other tasks, such as 2D cursor manipulation. A study with 12 blind users indicates a significant reduction (40%) in navigation time compared to using a keyboard. Further, a diary study with our blind co-author highlights Wheeler's additional benefits, such as accessing UI elements with partial metadata and facilitating mixed-ability collaboration. + + + + +## Break Q&A: Sound & Music +### SonoHaptics: An Audio-Haptic Cursor for Gaze-Based Object Selection in XR +Authors: Hyunsung Cho, Naveen Sendhilnathan, Michael Nebeling, Tianyi Wang, Purnima Padmanabhan, Jonathan Browder, David Lindlbauer, Tanya Jonker, Kashyap Todi + +[Link](https://programs.sigchi.org/uist/2024/program/content/170927) + +Abstract: We introduce SonoHaptics, an audio-haptic cursor for gaze-based 3D object selection. SonoHaptics addresses challenges around providing accurate visual feedback during gaze-based selection in Extended Reality (XR), e.g., lack of world-locked displays in no- or limited-display smart glasses and visual inconsistencies. To enable users to distinguish objects without visual feedback, SonoHaptics employs the concept of cross-modal correspondence in human perception to map visual features of objects (color, size, position, material) to audio-haptic properties (pitch, amplitude, direction, timbre). We contribute data-driven models for determining cross-modal mappings of visual features to audio and haptic features, and a computational approach to automatically generate audio-haptic feedback for objects in the user's environment. SonoHaptics provides global feedback that is unique to each object in the scene, and local feedback to amplify differences between nearby objects. Our comparative evaluation shows that SonoHaptics enables accurate object identification and selection in a cluttered scene without visual feedback. + + + +### SonifyAR: Context-Aware Sound Generation in Augmented Reality +Authors: Xia Su, Jon Froehlich, Eunyee Koh, Chang Xiao + +[Link](https://programs.sigchi.org/uist/2024/program/content/170866) + +Abstract: Sound plays a crucial role in enhancing user experience and immersiveness in Augmented Reality (AR). However, current platforms lack support for AR sound authoring due to limited interaction types, challenges in collecting and specifying context information, and difficulty in acquiring matching sound assets. We present SonifyAR, an LLM-based AR sound authoring system that generates context-aware sound effects for AR experiences. SonifyAR expands the current design space of AR sound and implements a Programming by Demonstration (PbD) pipeline to automatically collect contextual information of AR events, including virtual-content-semantics and real-world context. This context information is then processed by a large language model to acquire sound effects with Recommendation, Retrieval, Generation, and Transfer methods. To evaluate the usability and performance of our system, we conducted a user study with eight participants and created five example applications, including an AR-based science experiment, and an assistive application for low-vision AR users. + + + +### Auptimize: Optimal Placement of Spatial Audio Cues for Extended Reality +Authors: Hyunsung Cho, Alexander Wang, Divya Kartik, Emily Xie, Yukang Yan, David Lindlbauer + +[Link](https://programs.sigchi.org/uist/2024/program/content/170952) + +Abstract: Spatial audio in Extended Reality (XR) provides users with better awareness of where virtual elements are placed, and efficiently guides them to events such as notifications, system alerts from different windows, or approaching avatars. Humans, however, are inaccurate in localizing sound cues, especially with multiple sources due to limitations in human auditory perception such as angular discrimination error and front-back confusion. This decreases the efficiency of XR interfaces because users misidentify from which XR element a sound is coming. To address this, we propose Auptimize, a novel computational approach for placing XR sound sources, which mitigates such localization errors by utilizing the ventriloquist effect. Auptimize disentangles the sound source locations from the visual elements and relocates the sound sources to optimal positions for unambiguous identification of sound cues, avoiding errors due to inter-source proximity and front-back confusion. Our evaluation shows that Auptimize decreases spatial audio-based source identification errors compared to playing sound cues at the paired visual-sound locations. We demonstrate the applicability of Auptimize for diverse spatial audio-based interactive XR scenarios. + + + +### EarHover: Mid-Air Gesture Recognition for Hearables Using Sound Leakage Signals +BEST_PAPER + +Authors: Shunta Suzuki, Takashi Amesaka, Hiroki Watanabe, Buntarou Shizuki, Yuta Sugiura + +[Link](https://programs.sigchi.org/uist/2024/program/content/170787) + +Abstract: We introduce EarHover, an innovative system that enables mid-air gesture input for hearables. Mid-air gesture input, which eliminates the need to touch the device and thus helps to keep hands and the device clean, has been known to have high demand based on previous surveys. However, existing mid-air gesture input methods for hearables have been limited to adding cameras or infrared sensors. By focusing on the sound leakage phenomenon unique to hearables, we have realized mid-air gesture recognition using a speaker and an external microphone that are highly compatible with hearables. The signal leaked to the outside of the device due to sound leakage can be measured by an external microphone, which detects the differences in reflection characteristics caused by the hand's speed and shape during mid-air gestures. +Among 27 types of gestures, we determined the seven most suitable gestures for EarHover in terms of signal discrimination and user acceptability. We then evaluated the gesture detection and classification performance of two prototype devices (in-ear type/open-ear type) for real-world application scenarios. + + + +### Towards Music-Aware Virtual Assistants +Authors: Alexander Wang, David Lindlbauer, Chris Donahue + +[Link](https://programs.sigchi.org/uist/2024/program/content/170955) + +Abstract: We propose a system for modifying spoken notifications in a manner that is sensitive to the music a user is listening to. Spoken notifications provide convenient access to rich information without the need for a screen. Virtual assistants see prevalent use in hands-free settings such as driving or exercising, activities where users also regularly enjoy listening to music. In such settings, virtual assistants will temporarily mute a user's music to improve intelligibility. However, users may perceive these interruptions as intrusive, negatively impacting their music-listening experience. To address this challenge, we propose the concept of music-aware virtual assistants, where speech notifications are modified to resemble a voice singing in harmony with the user's music. We contribute a system that processes user music and notification text to produce a blended mix, replacing original song lyrics with the notification content. In a user study comparing musical assistants to standard virtual assistants, participants expressed that musical assistants fit better with music, reduced intrusiveness, and provided a more delightful listening experience overall. + + + +