A quintessential feature of human intelligence is the ability to create ad-hoc conventions over time to achieve shared goals efficiently. We investigate how communication strategies evolve through repeated collaboration as people coordinate on shared procedural abstractions. To this end, we conducted an online unimodal study (N=98) using language to probe abstraction hierarchies. In a follow-up lab study (N=40), we examined how multimodal communication (speech and gestures) changed during physical collaboration. Pairs used augmented reality to isolate their partner’s hand and voice; one participant viewed a 3D virtual tower and sent instructions to the other, who built the physical tower. Participants became faster and more accurate by establishing linguistic and gestural abstractions and using cross-modal redundancy to emphasize changes from previous interactions. Based on these findings, we extend probabilistic models of convention formation to multimodal settings, capturing shifts in modality preferences. Our findings provide building blocks for designing convention-aware intelligent agents.
@article{maeda2026mutimodal,
author = {Kiyosu Maeda, William P McCarthy, Ching-Yi Tsai, Jeffrey Mu, Haoliang Wang, Robert Hawkins, Judith E. Fan, Parastoo Abtahi},
title = {Gesturing Toward Abstraction: Multimodal Convention Formation in Collaborative Physical Tasks},
journal = {CHI},
year = {2026},
}