@proceedings{17957, author = {Youcef Djenouri and Nassim Belmecheri and Tomasz Michalak and Jan Dubi{\'n}ski and Ahmed Belbachir and Anis Yazidi}, title = {Learning Graph Representation of Agent Diffusers}, abstract = {Diffusion-based generative models have significantly advanced text-to-image synthesis, demonstrating impressive text comprehensionand zero-shot generalization. These models refine images fromrandom noise based on textual prompts, with initial reliance ontext input shifting towards enhanced visual fidelity over time. Thistransition suggests that static model parameters might not opti-mally address the distinct phases of generation. We introduce LGR-AD (Learning Graph Representation of Agent Diffusers), a novelmulti-agent system designed to improve adaptability in dynamiccomputer vision tasks. LGR-AD models the generation process asa distributed system of interacting agents, each representing anexpert sub-model. These agents dynamically adapt to varying condi-tions and collaborate through a graph neural network that encodestheir relationships and performance metrics. Our approach employsa coordination mechanism based on top-k maximum spanning trees,optimizing the generation process. Each agent{\textquoteright}s decision-making isguided by a meta-model that minimizes a novel loss function, bal-ancing accuracy and diversity. Theoretical analysis and extensiveempirical evaluations show that LGR-AD outperforms traditionaldiffusion models across various benchmarks, highlighting its poten-tial for scalable and flexible solutions in complex image generationtasks.}, year = {2025}, journal = {AAMAS}, }