Publications
- Capability ≠ Interpretability: Human Interpretability of Vision Foundation Models2026. Unpublished
@unpublished{colin2026foundations, title = {Capability ≠ Interpretability: Human Interpretability of Vision Foundation Models}, author = {Colin, Julien and Goetschalckx, Lore and Oliver, Nuria and Serre, Thomas}, year = {2026}, } - Does human-alignment benefit interpretability?2026. Workshop on Representational Alignment (Re^4-Align) (ICLR)PDF ICLR
@inproceedings{colin2026benefit, title = {Does human-alignment benefit interpretability?}, author = {Colin, Julien and Oliver, Nuria M and Serre, Thomas}, booktitle = {Workshop on Representational Alignment (R$e^4$-Align) (ICLR)}, venue = {ICLR}, year = {2026}, } - Choosing the right basis for interpretability: Psychophysical comparison between neuron-based and dictionary-based representations2026. arXiv preprint.
@unpublished{colin2026basis, title = {Choosing the right basis for interpretability: Psychophysical comparison between neuron-based and dictionary-based representations}, author = {Colin, Julien and Goetschalckx, Lore and Fel, Thomas and Boutin, Victor and Serre, Thomas and Oliver, Nuria}, journal = {arXiv preprint.}, venue = {arXiv}, year = {2026}, arxiv = {2411.03993}, } - Mirror, mirror on the wall, who is the whitest of all? Racial biases in social media beauty filters2024. Social Media+ Society
@article{riccio2024mirror, title = {Mirror, mirror on the wall, who is the whitest of all? Racial biases in social media beauty filters}, author = {Riccio, Piera and Colin, Julien and Ogolla, Shirley and Oliver, Nuria}, journal = {Social Media+ Society}, venue = {Sage}, volume = {10}, number = {2}, pages = {20563051241239295}, year = {2024}, publisher = {SAGE Publications Sage UK: London, England}, url = {https://journals.sagepub.com/doi/full/10.1177/20563051241239295}, } - Unlocking Feature Visualization for Deep Network with MAgnitude Constrained Optimization2023. Thirty-seventh Conference on Neural Information Processing Systems
@inproceedings{fel2023maco, title = {Unlocking Feature Visualization for Deep Network with MAgnitude Constrained Optimization}, author = {Fel, Thomas and Boissin, Thibaut and Boutin, Victor and Picard, Agustin Martin and Novello, Paul and Colin, Julien and Linsley, Drew and ROUSSEAU, Tom and Cadene, Remi and Goetschalckx, Lore and others}, booktitle = {Thirty-seventh Conference on Neural Information Processing Systems}, venue = {NeurIPS}, year = {2023}, arxiv = {2306.06805}, url = {https://proceedings.neurips.cc/paper_files/paper/2023/hash/76d2f8e328e1081c22a77ca0fa330ca5-Abstract-Conference.html}, } - Diffusion Models as Artists: Are we Closing the Gap between Humans and Machines?2023. International Conference on Machine Learning
@inproceedings{boutin2023diffusion, title = {Diffusion Models as Artists: Are we Closing the Gap between Humans and Machines?}, author = {Boutin, Victor and Fel, Thomas and Singhal, Lakshya and Mukherji, Rishav and Nagaraj, Akash and Colin, Julien and Serre, Thomas}, booktitle = {International Conference on Machine Learning}, venue = {ICML}, year = {2023}, arxiv = {2301.11722}, url = {https://proceedings.mlr.press/v202/boutin23a.html}, } - Craft: Concept recursive activation factorization for explainability2023. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition
@inproceedings{fel2023craft, title = {Craft: Concept recursive activation factorization for explainability}, author = {Fel, Thomas and Picard, Agustin and Bethune, Louis and Boissin, Thibaut and Vigouroux, David and Colin, Julien and Cad{\`e}ne, R{\'e}mi and Serre, Thomas}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, venue = {CVPR}, year = {2023}, arxiv = {2211.10154}, url = {https://openaccess.thecvf.com/content/CVPR2023/html/Fel_CRAFT_Concept_Recursive_Activation_FacTorization_for_Explainability_CVPR_2023_paper.html}, } - What I Cannot Predict, I Do Not Understand: A Human-Centered Evaluation Framework for Explainability Methods2022. Thirty-sixth Conference on Neural Information Processing Systems
@inproceedings{colin2022cannot, title = {What I Cannot Predict, I Do Not Understand: A Human-Centered Evaluation Framework for Explainability Methods}, author = {Colin, Julien and Fel, Thomas and Cad{\`e}ne, R{\'e}mi and Serre, Thomas}, booktitle = {Thirty-sixth Conference on Neural Information Processing Systems}, venue = {NeurIPS}, year = {2022}, arxiv = {2112.04417}, url = {https://proceedings.neurips.cc/paper_files/paper/2022/hash/13113e938f2957891c0c5e8df811dd01-Abstract-Conference.html}, } - A benchmark for compositional visual reasoning2022. Thirty-sixth Conference on Neural Information Processing Systems
@inproceedings{zerroug2022benchmark, title = {A benchmark for compositional visual reasoning}, author = {Zerroug, Aimen and Vaishnav, Mohit and Colin, Julien and Musslick, Sebastian and Serre, Thomas}, booktitle = {Thirty-sixth Conference on Neural Information Processing Systems}, venue = {NeurIPS}, year = {2022}, arxiv = {2206.05379}, url = {https://proceedings.neurips.cc/paper_files/paper/2022/hash/c08ee8fe3d19521f3bfa4102898329fd-Abstract-Datasets_and_Benchmarks.html}, } - Xplique: A Deep Learning Explainability Toolbox2022. Workshop on Explainable Artificial Intelligence for Computer Vision (CVPR)
@inproceedings{fel2022xplique, title = {Xplique: A Deep Learning Explainability Toolbox}, author = {Fel, Thomas and Hervier, Lucas and Vigouroux, David and Poche, Antonin and Plakoo, Justin and Cadene, Remi and Chalvidal, Mathieu and Colin, Julien and Boissin, Thibaut and Bethune, Louis and Picard, Agustin and Nicodeme, Claire and Gardes, Laurent and Flandin, Gregory and Serre, Thomas}, booktitle = {Workshop on Explainable Artificial Intelligence for Computer Vision (CVPR)}, venue = {CVPR}, year = {2022}, arxiv = {2206.04394}, }